code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def p_dynamic_class_name_reference(p):
'''dynamic_class_name_reference : base_variable OBJECT_OPERATOR object_property dynamic_class_name_variable_properties
| base_variable'''
if len(p) == 5:
name, dims = p[3]
p[0] = ast.ObjectProperty(p[1], name, lineno=p.lineno(2))
for class_, dim, lineno in dims:
p[0] = class_(p[0], dim, lineno=lineno)
for name, dims in p[4]:
p[0] = ast.ObjectProperty(p[0], name, lineno=p.lineno(2))
for class_, dim, lineno in dims:
p[0] = class_(p[0], dim, lineno=lineno)
else:
p[0] = p[1] | def function[p_dynamic_class_name_reference, parameter[p]]:
constant[dynamic_class_name_reference : base_variable OBJECT_OPERATOR object_property dynamic_class_name_variable_properties
| base_variable]
if compare[call[name[len], parameter[name[p]]] equal[==] constant[5]] begin[:]
<ast.Tuple object at 0x7da20e9b3be0> assign[=] call[name[p]][constant[3]]
call[name[p]][constant[0]] assign[=] call[name[ast].ObjectProperty, parameter[call[name[p]][constant[1]], name[name]]]
for taget[tuple[[<ast.Name object at 0x7da20e9b05b0>, <ast.Name object at 0x7da20e9b2800>, <ast.Name object at 0x7da20e9b3160>]]] in starred[name[dims]] begin[:]
call[name[p]][constant[0]] assign[=] call[name[class_], parameter[call[name[p]][constant[0]], name[dim]]]
for taget[tuple[[<ast.Name object at 0x7da1b0b39810>, <ast.Name object at 0x7da1b0b3b820>]]] in starred[call[name[p]][constant[4]]] begin[:]
call[name[p]][constant[0]] assign[=] call[name[ast].ObjectProperty, parameter[call[name[p]][constant[0]], name[name]]]
for taget[tuple[[<ast.Name object at 0x7da207f98070>, <ast.Name object at 0x7da207f9a170>, <ast.Name object at 0x7da207f9abc0>]]] in starred[name[dims]] begin[:]
call[name[p]][constant[0]] assign[=] call[name[class_], parameter[call[name[p]][constant[0]], name[dim]]] | keyword[def] identifier[p_dynamic_class_name_reference] ( identifier[p] ):
literal[string]
keyword[if] identifier[len] ( identifier[p] )== literal[int] :
identifier[name] , identifier[dims] = identifier[p] [ literal[int] ]
identifier[p] [ literal[int] ]= identifier[ast] . identifier[ObjectProperty] ( identifier[p] [ literal[int] ], identifier[name] , identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))
keyword[for] identifier[class_] , identifier[dim] , identifier[lineno] keyword[in] identifier[dims] :
identifier[p] [ literal[int] ]= identifier[class_] ( identifier[p] [ literal[int] ], identifier[dim] , identifier[lineno] = identifier[lineno] )
keyword[for] identifier[name] , identifier[dims] keyword[in] identifier[p] [ literal[int] ]:
identifier[p] [ literal[int] ]= identifier[ast] . identifier[ObjectProperty] ( identifier[p] [ literal[int] ], identifier[name] , identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))
keyword[for] identifier[class_] , identifier[dim] , identifier[lineno] keyword[in] identifier[dims] :
identifier[p] [ literal[int] ]= identifier[class_] ( identifier[p] [ literal[int] ], identifier[dim] , identifier[lineno] = identifier[lineno] )
keyword[else] :
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ] | def p_dynamic_class_name_reference(p):
"""dynamic_class_name_reference : base_variable OBJECT_OPERATOR object_property dynamic_class_name_variable_properties
| base_variable"""
if len(p) == 5:
(name, dims) = p[3]
p[0] = ast.ObjectProperty(p[1], name, lineno=p.lineno(2))
for (class_, dim, lineno) in dims:
p[0] = class_(p[0], dim, lineno=lineno) # depends on [control=['for'], data=[]]
for (name, dims) in p[4]:
p[0] = ast.ObjectProperty(p[0], name, lineno=p.lineno(2))
for (class_, dim, lineno) in dims:
p[0] = class_(p[0], dim, lineno=lineno) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
p[0] = p[1] |
def insert(self, table, data_list, return_cols='id'):
"""
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and n cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
# Make sure data_list has content
if len(data_list) == 0:
# No need to continue
return []
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# Do not return here, let the exception handle the error that will be thrown when the query runs
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
try:
with self.getcursor() as cur:
query = "INSERT INTO {table} ({fields}) VALUES {values} {return_cols}"\
.format(table=table,
fields='"{0}"'.format('", "'.join(data_list[0].keys())),
values=','.join(['%s'] * len(data_list)),
return_cols=return_cols,
)
values = []
for row in [tuple(v.values()) for v in data_list]:
values.append(_check_values(row))
query = cur.mogrify(query, values)
cur.execute(query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error inserting data")
logger.debug("Error inserting data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2]) | def function[insert, parameter[self, table, data_list, return_cols]]:
constant[
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and n cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
]
variable[data_list] assign[=] call[name[copy].deepcopy, parameter[name[data_list]]]
if <ast.UnaryOp object at 0x7da1b28efc70> begin[:]
variable[data_list] assign[=] list[[<ast.Name object at 0x7da1b28efaf0>]]
if compare[call[name[len], parameter[name[data_list]]] equal[==] constant[0]] begin[:]
return[list[[]]]
if <ast.UnaryOp object at 0x7da1b28c0370> begin[:]
call[name[logger].critical, parameter[constant[Data must be a list of dicts]]]
if <ast.BoolOp object at 0x7da1b28ef8e0> begin[:]
variable[return_cols] assign[=] constant[]
if compare[call[name[len], parameter[name[return_cols]]] greater[>] constant[0]] begin[:]
variable[return_cols] assign[=] binary_operation[constant[RETURNING ] + call[constant[,].join, parameter[name[return_cols]]]]
<ast.Try object at 0x7da1b28ef130> | keyword[def] identifier[insert] ( identifier[self] , identifier[table] , identifier[data_list] , identifier[return_cols] = literal[string] ):
literal[string]
identifier[data_list] = identifier[copy] . identifier[deepcopy] ( identifier[data_list] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[data_list] , identifier[list] ):
identifier[data_list] =[ identifier[data_list] ]
keyword[if] identifier[len] ( identifier[data_list] )== literal[int] :
keyword[return] []
keyword[if] keyword[not] identifier[isinstance] ( identifier[data_list] [ literal[int] ], identifier[dict] ):
identifier[logger] . identifier[critical] ( literal[string] )
keyword[if] identifier[return_cols] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[return_cols] )== literal[int] keyword[or] identifier[return_cols] [ literal[int] ] keyword[is] keyword[None] :
identifier[return_cols] = literal[string]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[return_cols] , identifier[list] ):
identifier[return_cols] =[ identifier[return_cols] ]
keyword[if] identifier[len] ( identifier[return_cols] )> literal[int] :
identifier[return_cols] = literal[string] + literal[string] . identifier[join] ( identifier[return_cols] )
keyword[try] :
keyword[with] identifier[self] . identifier[getcursor] () keyword[as] identifier[cur] :
identifier[query] = literal[string] . identifier[format] ( identifier[table] = identifier[table] ,
identifier[fields] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[data_list] [ literal[int] ]. identifier[keys] ())),
identifier[values] = literal[string] . identifier[join] ([ literal[string] ]* identifier[len] ( identifier[data_list] )),
identifier[return_cols] = identifier[return_cols] ,
)
identifier[values] =[]
keyword[for] identifier[row] keyword[in] [ identifier[tuple] ( identifier[v] . identifier[values] ()) keyword[for] identifier[v] keyword[in] identifier[data_list] ]:
identifier[values] . identifier[append] ( identifier[_check_values] ( identifier[row] ))
identifier[query] = identifier[cur] . identifier[mogrify] ( identifier[query] , identifier[values] )
identifier[cur] . identifier[execute] ( identifier[query] )
keyword[try] :
keyword[return] identifier[cur] . identifier[fetchall] ()
keyword[except] identifier[Exception] :
keyword[return] keyword[None]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[exception] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[data] = identifier[data_list] ))
keyword[raise] identifier[e] . identifier[with_traceback] ( identifier[sys] . identifier[exc_info] ()[ literal[int] ]) | def insert(self, table, data_list, return_cols='id'):
"""
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and n cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list] # depends on [control=['if'], data=[]]
# Make sure data_list has content
if len(data_list) == 0:
# No need to continue
return [] # depends on [control=['if'], data=[]]
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical('Data must be a list of dicts') # depends on [control=['if'], data=[]]
# Do not return here, let the exception handle the error that will be thrown when the query runs
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = '' # depends on [control=['if'], data=[]]
elif not isinstance(return_cols, list):
return_cols = [return_cols] # depends on [control=['if'], data=[]]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols) # depends on [control=['if'], data=[]]
try:
with self.getcursor() as cur:
query = 'INSERT INTO {table} ({fields}) VALUES {values} {return_cols}'.format(table=table, fields='"{0}"'.format('", "'.join(data_list[0].keys())), values=','.join(['%s'] * len(data_list)), return_cols=return_cols)
values = []
for row in [tuple(v.values()) for v in data_list]:
values.append(_check_values(row)) # depends on [control=['for'], data=['row']]
query = cur.mogrify(query, values)
cur.execute(query)
try:
return cur.fetchall() # depends on [control=['try'], data=[]]
except Exception:
return None # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['cur']] # depends on [control=['try'], data=[]]
except Exception as e:
logger.exception('Error inserting data')
logger.debug('Error inserting data: {data}'.format(data=data_list))
raise e.with_traceback(sys.exc_info()[2]) # depends on [control=['except'], data=['e']] |
def _get_shallow_site_response_term(self, C, vs30, pga_rock):
"""
Returns the shallow site response term defined in equations 17, 18 and
19
"""
vs_mod = vs30 / C["k1"]
# Get linear global site response term
f_site_g = C["c11"] * np.log(vs_mod)
idx = vs30 > C["k1"]
f_site_g[idx] = f_site_g[idx] + (C["k2"] * self.CONSTS["n"] *
np.log(vs_mod[idx]))
# Get nonlinear site response term
idx = np.logical_not(idx)
if np.any(idx):
f_site_g[idx] = f_site_g[idx] + C["k2"] * (
np.log(pga_rock[idx] +
self.CONSTS["c"] * (vs_mod[idx] ** self.CONSTS["n"])) -
np.log(pga_rock[idx] + self.CONSTS["c"])
)
# For Japan sites (SJ = 1) further scaling is needed (equation 19)
if self.CONSTS["SJ"]:
fsite_j = np.log(vs_mod)
idx = vs30 > 200.0
if np.any(idx):
fsite_j[idx] = (C["c13"] + C["k2"] * self.CONSTS["n"]) *\
fsite_j[idx]
idx = np.logical_not(idx)
if np.any(idx):
fsite_j[idx] = (C["c12"] + C["k2"] * self.CONSTS["n"]) *\
(fsite_j[idx] - np.log(200.0 / C["k1"]))
return f_site_g + fsite_j
else:
return f_site_g | def function[_get_shallow_site_response_term, parameter[self, C, vs30, pga_rock]]:
constant[
Returns the shallow site response term defined in equations 17, 18 and
19
]
variable[vs_mod] assign[=] binary_operation[name[vs30] / call[name[C]][constant[k1]]]
variable[f_site_g] assign[=] binary_operation[call[name[C]][constant[c11]] * call[name[np].log, parameter[name[vs_mod]]]]
variable[idx] assign[=] compare[name[vs30] greater[>] call[name[C]][constant[k1]]]
call[name[f_site_g]][name[idx]] assign[=] binary_operation[call[name[f_site_g]][name[idx]] + binary_operation[binary_operation[call[name[C]][constant[k2]] * call[name[self].CONSTS][constant[n]]] * call[name[np].log, parameter[call[name[vs_mod]][name[idx]]]]]]
variable[idx] assign[=] call[name[np].logical_not, parameter[name[idx]]]
if call[name[np].any, parameter[name[idx]]] begin[:]
call[name[f_site_g]][name[idx]] assign[=] binary_operation[call[name[f_site_g]][name[idx]] + binary_operation[call[name[C]][constant[k2]] * binary_operation[call[name[np].log, parameter[binary_operation[call[name[pga_rock]][name[idx]] + binary_operation[call[name[self].CONSTS][constant[c]] * binary_operation[call[name[vs_mod]][name[idx]] ** call[name[self].CONSTS][constant[n]]]]]]] - call[name[np].log, parameter[binary_operation[call[name[pga_rock]][name[idx]] + call[name[self].CONSTS][constant[c]]]]]]]]
if call[name[self].CONSTS][constant[SJ]] begin[:]
variable[fsite_j] assign[=] call[name[np].log, parameter[name[vs_mod]]]
variable[idx] assign[=] compare[name[vs30] greater[>] constant[200.0]]
if call[name[np].any, parameter[name[idx]]] begin[:]
call[name[fsite_j]][name[idx]] assign[=] binary_operation[binary_operation[call[name[C]][constant[c13]] + binary_operation[call[name[C]][constant[k2]] * call[name[self].CONSTS][constant[n]]]] * call[name[fsite_j]][name[idx]]]
variable[idx] assign[=] call[name[np].logical_not, parameter[name[idx]]]
if call[name[np].any, parameter[name[idx]]] begin[:]
call[name[fsite_j]][name[idx]] assign[=] binary_operation[binary_operation[call[name[C]][constant[c12]] + binary_operation[call[name[C]][constant[k2]] * call[name[self].CONSTS][constant[n]]]] * binary_operation[call[name[fsite_j]][name[idx]] - call[name[np].log, parameter[binary_operation[constant[200.0] / call[name[C]][constant[k1]]]]]]]
return[binary_operation[name[f_site_g] + name[fsite_j]]] | keyword[def] identifier[_get_shallow_site_response_term] ( identifier[self] , identifier[C] , identifier[vs30] , identifier[pga_rock] ):
literal[string]
identifier[vs_mod] = identifier[vs30] / identifier[C] [ literal[string] ]
identifier[f_site_g] = identifier[C] [ literal[string] ]* identifier[np] . identifier[log] ( identifier[vs_mod] )
identifier[idx] = identifier[vs30] > identifier[C] [ literal[string] ]
identifier[f_site_g] [ identifier[idx] ]= identifier[f_site_g] [ identifier[idx] ]+( identifier[C] [ literal[string] ]* identifier[self] . identifier[CONSTS] [ literal[string] ]*
identifier[np] . identifier[log] ( identifier[vs_mod] [ identifier[idx] ]))
identifier[idx] = identifier[np] . identifier[logical_not] ( identifier[idx] )
keyword[if] identifier[np] . identifier[any] ( identifier[idx] ):
identifier[f_site_g] [ identifier[idx] ]= identifier[f_site_g] [ identifier[idx] ]+ identifier[C] [ literal[string] ]*(
identifier[np] . identifier[log] ( identifier[pga_rock] [ identifier[idx] ]+
identifier[self] . identifier[CONSTS] [ literal[string] ]*( identifier[vs_mod] [ identifier[idx] ]** identifier[self] . identifier[CONSTS] [ literal[string] ]))-
identifier[np] . identifier[log] ( identifier[pga_rock] [ identifier[idx] ]+ identifier[self] . identifier[CONSTS] [ literal[string] ])
)
keyword[if] identifier[self] . identifier[CONSTS] [ literal[string] ]:
identifier[fsite_j] = identifier[np] . identifier[log] ( identifier[vs_mod] )
identifier[idx] = identifier[vs30] > literal[int]
keyword[if] identifier[np] . identifier[any] ( identifier[idx] ):
identifier[fsite_j] [ identifier[idx] ]=( identifier[C] [ literal[string] ]+ identifier[C] [ literal[string] ]* identifier[self] . identifier[CONSTS] [ literal[string] ])* identifier[fsite_j] [ identifier[idx] ]
identifier[idx] = identifier[np] . identifier[logical_not] ( identifier[idx] )
keyword[if] identifier[np] . identifier[any] ( identifier[idx] ):
identifier[fsite_j] [ identifier[idx] ]=( identifier[C] [ literal[string] ]+ identifier[C] [ literal[string] ]* identifier[self] . identifier[CONSTS] [ literal[string] ])*( identifier[fsite_j] [ identifier[idx] ]- identifier[np] . identifier[log] ( literal[int] / identifier[C] [ literal[string] ]))
keyword[return] identifier[f_site_g] + identifier[fsite_j]
keyword[else] :
keyword[return] identifier[f_site_g] | def _get_shallow_site_response_term(self, C, vs30, pga_rock):
"""
Returns the shallow site response term defined in equations 17, 18 and
19
"""
vs_mod = vs30 / C['k1']
# Get linear global site response term
f_site_g = C['c11'] * np.log(vs_mod)
idx = vs30 > C['k1']
f_site_g[idx] = f_site_g[idx] + C['k2'] * self.CONSTS['n'] * np.log(vs_mod[idx])
# Get nonlinear site response term
idx = np.logical_not(idx)
if np.any(idx):
f_site_g[idx] = f_site_g[idx] + C['k2'] * (np.log(pga_rock[idx] + self.CONSTS['c'] * vs_mod[idx] ** self.CONSTS['n']) - np.log(pga_rock[idx] + self.CONSTS['c'])) # depends on [control=['if'], data=[]]
# For Japan sites (SJ = 1) further scaling is needed (equation 19)
if self.CONSTS['SJ']:
fsite_j = np.log(vs_mod)
idx = vs30 > 200.0
if np.any(idx):
fsite_j[idx] = (C['c13'] + C['k2'] * self.CONSTS['n']) * fsite_j[idx] # depends on [control=['if'], data=[]]
idx = np.logical_not(idx)
if np.any(idx):
fsite_j[idx] = (C['c12'] + C['k2'] * self.CONSTS['n']) * (fsite_j[idx] - np.log(200.0 / C['k1'])) # depends on [control=['if'], data=[]]
return f_site_g + fsite_j # depends on [control=['if'], data=[]]
else:
return f_site_g |
def get_units_from_category(self, category) -> typing.Iterator['BaseUnit']:
"""
Args:
category: unit category
Returns: generator over all units of a specific category in this coalition
"""
Mission.validator_group_category.validate(category, 'group category')
for unit in self.units:
if unit.group_category == category:
yield unit | def function[get_units_from_category, parameter[self, category]]:
constant[
Args:
category: unit category
Returns: generator over all units of a specific category in this coalition
]
call[name[Mission].validator_group_category.validate, parameter[name[category], constant[group category]]]
for taget[name[unit]] in starred[name[self].units] begin[:]
if compare[name[unit].group_category equal[==] name[category]] begin[:]
<ast.Yield object at 0x7da1b142b2b0> | keyword[def] identifier[get_units_from_category] ( identifier[self] , identifier[category] )-> identifier[typing] . identifier[Iterator] [ literal[string] ]:
literal[string]
identifier[Mission] . identifier[validator_group_category] . identifier[validate] ( identifier[category] , literal[string] )
keyword[for] identifier[unit] keyword[in] identifier[self] . identifier[units] :
keyword[if] identifier[unit] . identifier[group_category] == identifier[category] :
keyword[yield] identifier[unit] | def get_units_from_category(self, category) -> typing.Iterator['BaseUnit']:
"""
Args:
category: unit category
Returns: generator over all units of a specific category in this coalition
"""
Mission.validator_group_category.validate(category, 'group category')
for unit in self.units:
if unit.group_category == category:
yield unit # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['unit']] |
def hydrophobic_atoms(self, all_atoms):
"""Select all carbon atoms which have only carbons and/or hydrogens as direct neighbors."""
atom_set = []
data = namedtuple('hydrophobic', 'atom orig_atom orig_idx')
atm = [a for a in all_atoms if a.atomicnum == 6 and set([natom.GetAtomicNum() for natom
in pybel.ob.OBAtomAtomIter(a.OBAtom)]).issubset(
{1, 6})]
for atom in atm:
orig_idx = self.Mapper.mapid(atom.idx, mtype=self.mtype, bsid=self.bsid)
orig_atom = self.Mapper.id_to_atom(orig_idx)
if atom.idx not in self.altconf:
atom_set.append(data(atom=atom, orig_atom=orig_atom, orig_idx=orig_idx))
return atom_set | def function[hydrophobic_atoms, parameter[self, all_atoms]]:
constant[Select all carbon atoms which have only carbons and/or hydrogens as direct neighbors.]
variable[atom_set] assign[=] list[[]]
variable[data] assign[=] call[name[namedtuple], parameter[constant[hydrophobic], constant[atom orig_atom orig_idx]]]
variable[atm] assign[=] <ast.ListComp object at 0x7da18bcc8eb0>
for taget[name[atom]] in starred[name[atm]] begin[:]
variable[orig_idx] assign[=] call[name[self].Mapper.mapid, parameter[name[atom].idx]]
variable[orig_atom] assign[=] call[name[self].Mapper.id_to_atom, parameter[name[orig_idx]]]
if compare[name[atom].idx <ast.NotIn object at 0x7da2590d7190> name[self].altconf] begin[:]
call[name[atom_set].append, parameter[call[name[data], parameter[]]]]
return[name[atom_set]] | keyword[def] identifier[hydrophobic_atoms] ( identifier[self] , identifier[all_atoms] ):
literal[string]
identifier[atom_set] =[]
identifier[data] = identifier[namedtuple] ( literal[string] , literal[string] )
identifier[atm] =[ identifier[a] keyword[for] identifier[a] keyword[in] identifier[all_atoms] keyword[if] identifier[a] . identifier[atomicnum] == literal[int] keyword[and] identifier[set] ([ identifier[natom] . identifier[GetAtomicNum] () keyword[for] identifier[natom]
keyword[in] identifier[pybel] . identifier[ob] . identifier[OBAtomAtomIter] ( identifier[a] . identifier[OBAtom] )]). identifier[issubset] (
{ literal[int] , literal[int] })]
keyword[for] identifier[atom] keyword[in] identifier[atm] :
identifier[orig_idx] = identifier[self] . identifier[Mapper] . identifier[mapid] ( identifier[atom] . identifier[idx] , identifier[mtype] = identifier[self] . identifier[mtype] , identifier[bsid] = identifier[self] . identifier[bsid] )
identifier[orig_atom] = identifier[self] . identifier[Mapper] . identifier[id_to_atom] ( identifier[orig_idx] )
keyword[if] identifier[atom] . identifier[idx] keyword[not] keyword[in] identifier[self] . identifier[altconf] :
identifier[atom_set] . identifier[append] ( identifier[data] ( identifier[atom] = identifier[atom] , identifier[orig_atom] = identifier[orig_atom] , identifier[orig_idx] = identifier[orig_idx] ))
keyword[return] identifier[atom_set] | def hydrophobic_atoms(self, all_atoms):
"""Select all carbon atoms which have only carbons and/or hydrogens as direct neighbors."""
atom_set = []
data = namedtuple('hydrophobic', 'atom orig_atom orig_idx')
atm = [a for a in all_atoms if a.atomicnum == 6 and set([natom.GetAtomicNum() for natom in pybel.ob.OBAtomAtomIter(a.OBAtom)]).issubset({1, 6})]
for atom in atm:
orig_idx = self.Mapper.mapid(atom.idx, mtype=self.mtype, bsid=self.bsid)
orig_atom = self.Mapper.id_to_atom(orig_idx)
if atom.idx not in self.altconf:
atom_set.append(data(atom=atom, orig_atom=orig_atom, orig_idx=orig_idx)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['atom']]
return atom_set |
def MakeRequest(http, http_request, retries=7, max_retry_wait=60,
redirections=5,
retry_func=HandleExceptionsAndRebuildHttpConnections,
check_response_func=CheckResponse):
"""Send http_request via the given http, performing error/retry handling.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
retries: (int, default 7) Number of retries to attempt on retryable
replies (such as 429 or 5XX).
max_retry_wait: (int, default 60) Maximum number of seconds to wait
when retrying.
redirections: (int, default 5) Number of redirects to follow.
retry_func: Function to handle retries on exceptions. Argument is an
ExceptionRetryArgs tuple.
check_response_func: Function to validate the HTTP response.
Arguments are (Response, response content, url).
Raises:
InvalidDataFromServerError: if there is no response after retries.
Returns:
A Response object.
"""
retry = 0
first_req_time = time.time()
while True:
try:
return _MakeRequestNoRetry(
http, http_request, redirections=redirections,
check_response_func=check_response_func)
# retry_func will consume the exception types it handles and raise.
# pylint: disable=broad-except
except Exception as e:
retry += 1
if retry >= retries:
raise
else:
total_wait_sec = time.time() - first_req_time
retry_func(ExceptionRetryArgs(http, http_request, e, retry,
max_retry_wait, total_wait_sec)) | def function[MakeRequest, parameter[http, http_request, retries, max_retry_wait, redirections, retry_func, check_response_func]]:
constant[Send http_request via the given http, performing error/retry handling.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
retries: (int, default 7) Number of retries to attempt on retryable
replies (such as 429 or 5XX).
max_retry_wait: (int, default 60) Maximum number of seconds to wait
when retrying.
redirections: (int, default 5) Number of redirects to follow.
retry_func: Function to handle retries on exceptions. Argument is an
ExceptionRetryArgs tuple.
check_response_func: Function to validate the HTTP response.
Arguments are (Response, response content, url).
Raises:
InvalidDataFromServerError: if there is no response after retries.
Returns:
A Response object.
]
variable[retry] assign[=] constant[0]
variable[first_req_time] assign[=] call[name[time].time, parameter[]]
while constant[True] begin[:]
<ast.Try object at 0x7da1b07f6f80> | keyword[def] identifier[MakeRequest] ( identifier[http] , identifier[http_request] , identifier[retries] = literal[int] , identifier[max_retry_wait] = literal[int] ,
identifier[redirections] = literal[int] ,
identifier[retry_func] = identifier[HandleExceptionsAndRebuildHttpConnections] ,
identifier[check_response_func] = identifier[CheckResponse] ):
literal[string]
identifier[retry] = literal[int]
identifier[first_req_time] = identifier[time] . identifier[time] ()
keyword[while] keyword[True] :
keyword[try] :
keyword[return] identifier[_MakeRequestNoRetry] (
identifier[http] , identifier[http_request] , identifier[redirections] = identifier[redirections] ,
identifier[check_response_func] = identifier[check_response_func] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[retry] += literal[int]
keyword[if] identifier[retry] >= identifier[retries] :
keyword[raise]
keyword[else] :
identifier[total_wait_sec] = identifier[time] . identifier[time] ()- identifier[first_req_time]
identifier[retry_func] ( identifier[ExceptionRetryArgs] ( identifier[http] , identifier[http_request] , identifier[e] , identifier[retry] ,
identifier[max_retry_wait] , identifier[total_wait_sec] )) | def MakeRequest(http, http_request, retries=7, max_retry_wait=60, redirections=5, retry_func=HandleExceptionsAndRebuildHttpConnections, check_response_func=CheckResponse):
"""Send http_request via the given http, performing error/retry handling.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
retries: (int, default 7) Number of retries to attempt on retryable
replies (such as 429 or 5XX).
max_retry_wait: (int, default 60) Maximum number of seconds to wait
when retrying.
redirections: (int, default 5) Number of redirects to follow.
retry_func: Function to handle retries on exceptions. Argument is an
ExceptionRetryArgs tuple.
check_response_func: Function to validate the HTTP response.
Arguments are (Response, response content, url).
Raises:
InvalidDataFromServerError: if there is no response after retries.
Returns:
A Response object.
"""
retry = 0
first_req_time = time.time()
while True:
try:
return _MakeRequestNoRetry(http, http_request, redirections=redirections, check_response_func=check_response_func) # depends on [control=['try'], data=[]]
# retry_func will consume the exception types it handles and raise.
# pylint: disable=broad-except
except Exception as e:
retry += 1
if retry >= retries:
raise # depends on [control=['if'], data=[]]
else:
total_wait_sec = time.time() - first_req_time
retry_func(ExceptionRetryArgs(http, http_request, e, retry, max_retry_wait, total_wait_sec)) # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]] |
def decompress(content, encoding, filename='N/A'):
"""
Decompress file content.
Required:
content (bytes): a file to be compressed
encoding: None (no compression) or 'gzip'
Optional:
filename (str:default:'N/A'): Used for debugging messages
Raises:
NotImplementedError if an unsupported codec is specified.
compression.EncodeError if the encoder has an issue
Return: decompressed content
"""
try:
encoding = (encoding or '').lower()
if encoding == '':
return content
elif encoding == 'gzip':
return gunzip(content)
except DecompressionError as err:
print("Filename: " + str(filename))
raise
raise NotImplementedError(str(encoding) + ' is not currently supported. Supported Options: None, gzip') | def function[decompress, parameter[content, encoding, filename]]:
constant[
Decompress file content.
Required:
content (bytes): a file to be compressed
encoding: None (no compression) or 'gzip'
Optional:
filename (str:default:'N/A'): Used for debugging messages
Raises:
NotImplementedError if an unsupported codec is specified.
compression.EncodeError if the encoder has an issue
Return: decompressed content
]
<ast.Try object at 0x7da1b0b10070>
<ast.Raise object at 0x7da20c7968f0> | keyword[def] identifier[decompress] ( identifier[content] , identifier[encoding] , identifier[filename] = literal[string] ):
literal[string]
keyword[try] :
identifier[encoding] =( identifier[encoding] keyword[or] literal[string] ). identifier[lower] ()
keyword[if] identifier[encoding] == literal[string] :
keyword[return] identifier[content]
keyword[elif] identifier[encoding] == literal[string] :
keyword[return] identifier[gunzip] ( identifier[content] )
keyword[except] identifier[DecompressionError] keyword[as] identifier[err] :
identifier[print] ( literal[string] + identifier[str] ( identifier[filename] ))
keyword[raise]
keyword[raise] identifier[NotImplementedError] ( identifier[str] ( identifier[encoding] )+ literal[string] ) | def decompress(content, encoding, filename='N/A'):
"""
Decompress file content.
Required:
content (bytes): a file to be compressed
encoding: None (no compression) or 'gzip'
Optional:
filename (str:default:'N/A'): Used for debugging messages
Raises:
NotImplementedError if an unsupported codec is specified.
compression.EncodeError if the encoder has an issue
Return: decompressed content
"""
try:
encoding = (encoding or '').lower()
if encoding == '':
return content # depends on [control=['if'], data=[]]
elif encoding == 'gzip':
return gunzip(content) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except DecompressionError as err:
print('Filename: ' + str(filename))
raise # depends on [control=['except'], data=[]]
raise NotImplementedError(str(encoding) + ' is not currently supported. Supported Options: None, gzip') |
def del_current_vrf(self):
""" Remove VRF to filter list session variable
"""
vrf_id = int(request.json['vrf_id'])
if vrf_id in session['current_vrfs']:
del session['current_vrfs'][vrf_id]
session.save()
return json.dumps(session.get('current_vrfs', {})) | def function[del_current_vrf, parameter[self]]:
constant[ Remove VRF to filter list session variable
]
variable[vrf_id] assign[=] call[name[int], parameter[call[name[request].json][constant[vrf_id]]]]
if compare[name[vrf_id] in call[name[session]][constant[current_vrfs]]] begin[:]
<ast.Delete object at 0x7da18eb56920>
call[name[session].save, parameter[]]
return[call[name[json].dumps, parameter[call[name[session].get, parameter[constant[current_vrfs], dictionary[[], []]]]]]] | keyword[def] identifier[del_current_vrf] ( identifier[self] ):
literal[string]
identifier[vrf_id] = identifier[int] ( identifier[request] . identifier[json] [ literal[string] ])
keyword[if] identifier[vrf_id] keyword[in] identifier[session] [ literal[string] ]:
keyword[del] identifier[session] [ literal[string] ][ identifier[vrf_id] ]
identifier[session] . identifier[save] ()
keyword[return] identifier[json] . identifier[dumps] ( identifier[session] . identifier[get] ( literal[string] ,{})) | def del_current_vrf(self):
""" Remove VRF to filter list session variable
"""
vrf_id = int(request.json['vrf_id'])
if vrf_id in session['current_vrfs']:
del session['current_vrfs'][vrf_id]
session.save() # depends on [control=['if'], data=['vrf_id']]
return json.dumps(session.get('current_vrfs', {})) |
def export(self):
"""
See DiskExportManager.export
"""
with LogTask('Exporting disk {} to {}'.format(self.name, self.dst)):
with utils.RollbackContext() as rollback:
rollback.prependDefer(
shutil.rmtree, self.dst, ignore_errors=True
)
self.copy()
if not self.disk['format'] == 'iso':
self.sparse()
self.calc_sha('sha1')
self.update_lago_metadata()
self.write_lago_metadata()
self.compress()
rollback.clear() | def function[export, parameter[self]]:
constant[
See DiskExportManager.export
]
with call[name[LogTask], parameter[call[constant[Exporting disk {} to {}].format, parameter[name[self].name, name[self].dst]]]] begin[:]
with call[name[utils].RollbackContext, parameter[]] begin[:]
call[name[rollback].prependDefer, parameter[name[shutil].rmtree, name[self].dst]]
call[name[self].copy, parameter[]]
if <ast.UnaryOp object at 0x7da1b0b30c10> begin[:]
call[name[self].sparse, parameter[]]
call[name[self].calc_sha, parameter[constant[sha1]]]
call[name[self].update_lago_metadata, parameter[]]
call[name[self].write_lago_metadata, parameter[]]
call[name[self].compress, parameter[]]
call[name[rollback].clear, parameter[]] | keyword[def] identifier[export] ( identifier[self] ):
literal[string]
keyword[with] identifier[LogTask] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[self] . identifier[dst] )):
keyword[with] identifier[utils] . identifier[RollbackContext] () keyword[as] identifier[rollback] :
identifier[rollback] . identifier[prependDefer] (
identifier[shutil] . identifier[rmtree] , identifier[self] . identifier[dst] , identifier[ignore_errors] = keyword[True]
)
identifier[self] . identifier[copy] ()
keyword[if] keyword[not] identifier[self] . identifier[disk] [ literal[string] ]== literal[string] :
identifier[self] . identifier[sparse] ()
identifier[self] . identifier[calc_sha] ( literal[string] )
identifier[self] . identifier[update_lago_metadata] ()
identifier[self] . identifier[write_lago_metadata] ()
identifier[self] . identifier[compress] ()
identifier[rollback] . identifier[clear] () | def export(self):
"""
See DiskExportManager.export
"""
with LogTask('Exporting disk {} to {}'.format(self.name, self.dst)):
with utils.RollbackContext() as rollback:
rollback.prependDefer(shutil.rmtree, self.dst, ignore_errors=True)
self.copy()
if not self.disk['format'] == 'iso':
self.sparse() # depends on [control=['if'], data=[]]
self.calc_sha('sha1')
self.update_lago_metadata()
self.write_lago_metadata()
self.compress()
rollback.clear() # depends on [control=['with'], data=['rollback']] # depends on [control=['with'], data=[]] |
def get_code(self):
"""
the final code for multi-checks
arguments:
the worst-case code from all added results,
or UNKNOWN if none were added
"""
code = UNKNOWN
for result in self._results:
if code == UNKNOWN or (result.code < UNKNOWN
and result.code > code):
code = result.code
return code | def function[get_code, parameter[self]]:
constant[
the final code for multi-checks
arguments:
the worst-case code from all added results,
or UNKNOWN if none were added
]
variable[code] assign[=] name[UNKNOWN]
for taget[name[result]] in starred[name[self]._results] begin[:]
if <ast.BoolOp object at 0x7da1b0810a60> begin[:]
variable[code] assign[=] name[result].code
return[name[code]] | keyword[def] identifier[get_code] ( identifier[self] ):
literal[string]
identifier[code] = identifier[UNKNOWN]
keyword[for] identifier[result] keyword[in] identifier[self] . identifier[_results] :
keyword[if] identifier[code] == identifier[UNKNOWN] keyword[or] ( identifier[result] . identifier[code] < identifier[UNKNOWN]
keyword[and] identifier[result] . identifier[code] > identifier[code] ):
identifier[code] = identifier[result] . identifier[code]
keyword[return] identifier[code] | def get_code(self):
"""
the final code for multi-checks
arguments:
the worst-case code from all added results,
or UNKNOWN if none were added
"""
code = UNKNOWN
for result in self._results:
if code == UNKNOWN or (result.code < UNKNOWN and result.code > code):
code = result.code # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['result']]
return code |
def _translate_composite(teleport_value):
"""Translate a composite teleport value into a val subschema."""
for key in ("Array", "Map", "Struct"):
value = teleport_value.get(key)
if value is None:
continue
return COMPOSITES[key](value)
raise DeserializationError(
"Could not interpret %r as a teleport schema." % teleport_value) | def function[_translate_composite, parameter[teleport_value]]:
constant[Translate a composite teleport value into a val subschema.]
for taget[name[key]] in starred[tuple[[<ast.Constant object at 0x7da1b1414430>, <ast.Constant object at 0x7da1b1416980>, <ast.Constant object at 0x7da1b14143a0>]]] begin[:]
variable[value] assign[=] call[name[teleport_value].get, parameter[name[key]]]
if compare[name[value] is constant[None]] begin[:]
continue
return[call[call[name[COMPOSITES]][name[key]], parameter[name[value]]]]
<ast.Raise object at 0x7da1b1417280> | keyword[def] identifier[_translate_composite] ( identifier[teleport_value] ):
literal[string]
keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[value] = identifier[teleport_value] . identifier[get] ( identifier[key] )
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[continue]
keyword[return] identifier[COMPOSITES] [ identifier[key] ]( identifier[value] )
keyword[raise] identifier[DeserializationError] (
literal[string] % identifier[teleport_value] ) | def _translate_composite(teleport_value):
"""Translate a composite teleport value into a val subschema."""
for key in ('Array', 'Map', 'Struct'):
value = teleport_value.get(key)
if value is None:
continue # depends on [control=['if'], data=[]]
return COMPOSITES[key](value) # depends on [control=['for'], data=['key']]
raise DeserializationError('Could not interpret %r as a teleport schema.' % teleport_value) |
def upsert(self, key, value, entry):
'''Update or Insert an entry into the list of dictionaries.
If a dictionary in the list is found where key matches the value, then
the FIRST matching list entry is replaced with entry
else
the entry is appended to the end of the list.
The new entry is not examined in any way. It is, in fact, possible
to upsert an entry that does not match the supplied key/value.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> entryA = {"name": "Willie", "age": 77}
>>> myPLOD = PLOD(test)
>>> print myPLOD.upsert("name", "Willie", entryA).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]},
{age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]},
{age: 19, income: 29000, name: 'Bill' , wigs: None },
{age: 77, income: None , name: 'Willie', wigs: None }
]
>>> entryB = {"name": "Joe", "age": 20, "income": 30, "wigs": [3, 2, 9]}
>>> print myPLOD.upsert("name", "Joe", entryB).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]},
{age: 20, income: 30, name: 'Joe' , wigs: [3, 2, 9]},
{age: 19, income: 29000, name: 'Bill' , wigs: None },
{age: 77, income: None , name: 'Willie', wigs: None }
]
:param key:
The dictionary key to examine.
:param value:
The value to search for as referenced by the key.
:param entry:
The replacement (or new) entry for the list.
:returns:
class
'''
index=internal.get_index(self.table, key, self.EQUAL, value)
if index is None:
self.index_track.append(len(self.table))
self.table.append(entry)
else:
self.table[index]=entry
return self | def function[upsert, parameter[self, key, value, entry]]:
constant[Update or Insert an entry into the list of dictionaries.
If a dictionary in the list is found where key matches the value, then
the FIRST matching list entry is replaced with entry
else
the entry is appended to the end of the list.
The new entry is not examined in any way. It is, in fact, possible
to upsert an entry that does not match the supplied key/value.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> entryA = {"name": "Willie", "age": 77}
>>> myPLOD = PLOD(test)
>>> print myPLOD.upsert("name", "Willie", entryA).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]},
{age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]},
{age: 19, income: 29000, name: 'Bill' , wigs: None },
{age: 77, income: None , name: 'Willie', wigs: None }
]
>>> entryB = {"name": "Joe", "age": 20, "income": 30, "wigs": [3, 2, 9]}
>>> print myPLOD.upsert("name", "Joe", entryB).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]},
{age: 20, income: 30, name: 'Joe' , wigs: [3, 2, 9]},
{age: 19, income: 29000, name: 'Bill' , wigs: None },
{age: 77, income: None , name: 'Willie', wigs: None }
]
:param key:
The dictionary key to examine.
:param value:
The value to search for as referenced by the key.
:param entry:
The replacement (or new) entry for the list.
:returns:
class
]
variable[index] assign[=] call[name[internal].get_index, parameter[name[self].table, name[key], name[self].EQUAL, name[value]]]
if compare[name[index] is constant[None]] begin[:]
call[name[self].index_track.append, parameter[call[name[len], parameter[name[self].table]]]]
call[name[self].table.append, parameter[name[entry]]]
return[name[self]] | keyword[def] identifier[upsert] ( identifier[self] , identifier[key] , identifier[value] , identifier[entry] ):
literal[string]
identifier[index] = identifier[internal] . identifier[get_index] ( identifier[self] . identifier[table] , identifier[key] , identifier[self] . identifier[EQUAL] , identifier[value] )
keyword[if] identifier[index] keyword[is] keyword[None] :
identifier[self] . identifier[index_track] . identifier[append] ( identifier[len] ( identifier[self] . identifier[table] ))
identifier[self] . identifier[table] . identifier[append] ( identifier[entry] )
keyword[else] :
identifier[self] . identifier[table] [ identifier[index] ]= identifier[entry]
keyword[return] identifier[self] | def upsert(self, key, value, entry):
"""Update or Insert an entry into the list of dictionaries.
If a dictionary in the list is found where key matches the value, then
the FIRST matching list entry is replaced with entry
else
the entry is appended to the end of the list.
The new entry is not examined in any way. It is, in fact, possible
to upsert an entry that does not match the supplied key/value.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> entryA = {"name": "Willie", "age": 77}
>>> myPLOD = PLOD(test)
>>> print myPLOD.upsert("name", "Willie", entryA).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]},
{age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]},
{age: 19, income: 29000, name: 'Bill' , wigs: None },
{age: 77, income: None , name: 'Willie', wigs: None }
]
>>> entryB = {"name": "Joe", "age": 20, "income": 30, "wigs": [3, 2, 9]}
>>> print myPLOD.upsert("name", "Joe", entryB).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]},
{age: 20, income: 30, name: 'Joe' , wigs: [3, 2, 9]},
{age: 19, income: 29000, name: 'Bill' , wigs: None },
{age: 77, income: None , name: 'Willie', wigs: None }
]
:param key:
The dictionary key to examine.
:param value:
The value to search for as referenced by the key.
:param entry:
The replacement (or new) entry for the list.
:returns:
class
"""
index = internal.get_index(self.table, key, self.EQUAL, value)
if index is None:
self.index_track.append(len(self.table))
self.table.append(entry) # depends on [control=['if'], data=[]]
else:
self.table[index] = entry
return self |
def deserialize(datagram, source):
"""
De-serialize a stream of byte to a message.
:param datagram: the incoming udp message
:param source: the source address and port (ip, port)
:return: the message
:rtype: Message
"""
try:
fmt = "!BBH"
pos = struct.calcsize(fmt)
s = struct.Struct(fmt)
values = s.unpack_from(datagram)
first = values[0]
code = values[1]
mid = values[2]
version = (first & 0xC0) >> 6
message_type = (first & 0x30) >> 4
token_length = (first & 0x0F)
if Serializer.is_response(code):
message = Response()
message.code = code
elif Serializer.is_request(code):
message = Request()
message.code = code
else:
message = Message()
message.source = source
message.destination = None
message.version = version
message.type = message_type
message.mid = mid
if token_length > 0:
fmt = "%ss" % token_length
s = struct.Struct(fmt)
token_value = s.unpack_from(datagram[pos:])[0]
message.token = token_value.decode("utf-8")
else:
message.token = None
pos += token_length
current_option = 0
values = datagram[pos:]
length_packet = len(values)
pos = 0
while pos < length_packet:
next_byte = struct.unpack("B", values[pos].to_bytes(1, "big"))[0]
pos += 1
if next_byte != int(defines.PAYLOAD_MARKER):
# the first 4 bits of the byte represent the option delta
# delta = self._reader.read(4).uint
num, option_length, pos = Serializer.read_option_value_len_from_byte(next_byte, pos, values)
current_option += num
# read option
try:
option_item = defines.OptionRegistry.LIST[current_option]
except KeyError:
(opt_critical, _, _) = defines.OptionRegistry.get_option_flags(current_option)
if opt_critical:
raise AttributeError("Critical option %s unknown" % current_option)
else:
# If the non-critical option is unknown
# (vendor-specific, proprietary) - just skip it
#log.err("unrecognized option %d" % current_option)
pass
else:
if option_length == 0:
value = None
elif option_item.value_type == defines.INTEGER:
tmp = values[pos: pos + option_length]
value = 0
for b in tmp:
value = (value << 8) | struct.unpack("B", b.to_bytes(1, "big"))[0]
elif option_item.value_type == defines.OPAQUE:
tmp = values[pos: pos + option_length]
value = tmp
else:
value = values[pos: pos + option_length]
option = Option()
option.number = current_option
option.value = Serializer.convert_to_raw(current_option, value, option_length)
message.add_option(option)
if option.number == defines.OptionRegistry.CONTENT_TYPE.number:
message.payload_type = option.value
finally:
pos += option_length
else:
if length_packet <= pos:
# log.err("Payload Marker with no payload")
raise AttributeError("Packet length %s, pos %s" % (length_packet, pos))
message.payload = ""
payload = values[pos:]
try:
if message.payload_type == defines.Content_types["application/octet-stream"]:
message.payload = payload
else:
message.payload = payload.decode("utf-8")
except AttributeError:
message.payload = payload.decode("utf-8")
pos += len(payload)
return message
except AttributeError:
return defines.Codes.BAD_REQUEST.number
except struct.error:
return defines.Codes.BAD_REQUEST.number | def function[deserialize, parameter[datagram, source]]:
constant[
De-serialize a stream of byte to a message.
:param datagram: the incoming udp message
:param source: the source address and port (ip, port)
:return: the message
:rtype: Message
]
<ast.Try object at 0x7da204620550> | keyword[def] identifier[deserialize] ( identifier[datagram] , identifier[source] ):
literal[string]
keyword[try] :
identifier[fmt] = literal[string]
identifier[pos] = identifier[struct] . identifier[calcsize] ( identifier[fmt] )
identifier[s] = identifier[struct] . identifier[Struct] ( identifier[fmt] )
identifier[values] = identifier[s] . identifier[unpack_from] ( identifier[datagram] )
identifier[first] = identifier[values] [ literal[int] ]
identifier[code] = identifier[values] [ literal[int] ]
identifier[mid] = identifier[values] [ literal[int] ]
identifier[version] =( identifier[first] & literal[int] )>> literal[int]
identifier[message_type] =( identifier[first] & literal[int] )>> literal[int]
identifier[token_length] =( identifier[first] & literal[int] )
keyword[if] identifier[Serializer] . identifier[is_response] ( identifier[code] ):
identifier[message] = identifier[Response] ()
identifier[message] . identifier[code] = identifier[code]
keyword[elif] identifier[Serializer] . identifier[is_request] ( identifier[code] ):
identifier[message] = identifier[Request] ()
identifier[message] . identifier[code] = identifier[code]
keyword[else] :
identifier[message] = identifier[Message] ()
identifier[message] . identifier[source] = identifier[source]
identifier[message] . identifier[destination] = keyword[None]
identifier[message] . identifier[version] = identifier[version]
identifier[message] . identifier[type] = identifier[message_type]
identifier[message] . identifier[mid] = identifier[mid]
keyword[if] identifier[token_length] > literal[int] :
identifier[fmt] = literal[string] % identifier[token_length]
identifier[s] = identifier[struct] . identifier[Struct] ( identifier[fmt] )
identifier[token_value] = identifier[s] . identifier[unpack_from] ( identifier[datagram] [ identifier[pos] :])[ literal[int] ]
identifier[message] . identifier[token] = identifier[token_value] . identifier[decode] ( literal[string] )
keyword[else] :
identifier[message] . identifier[token] = keyword[None]
identifier[pos] += identifier[token_length]
identifier[current_option] = literal[int]
identifier[values] = identifier[datagram] [ identifier[pos] :]
identifier[length_packet] = identifier[len] ( identifier[values] )
identifier[pos] = literal[int]
keyword[while] identifier[pos] < identifier[length_packet] :
identifier[next_byte] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[values] [ identifier[pos] ]. identifier[to_bytes] ( literal[int] , literal[string] ))[ literal[int] ]
identifier[pos] += literal[int]
keyword[if] identifier[next_byte] != identifier[int] ( identifier[defines] . identifier[PAYLOAD_MARKER] ):
identifier[num] , identifier[option_length] , identifier[pos] = identifier[Serializer] . identifier[read_option_value_len_from_byte] ( identifier[next_byte] , identifier[pos] , identifier[values] )
identifier[current_option] += identifier[num]
keyword[try] :
identifier[option_item] = identifier[defines] . identifier[OptionRegistry] . identifier[LIST] [ identifier[current_option] ]
keyword[except] identifier[KeyError] :
( identifier[opt_critical] , identifier[_] , identifier[_] )= identifier[defines] . identifier[OptionRegistry] . identifier[get_option_flags] ( identifier[current_option] )
keyword[if] identifier[opt_critical] :
keyword[raise] identifier[AttributeError] ( literal[string] % identifier[current_option] )
keyword[else] :
keyword[pass]
keyword[else] :
keyword[if] identifier[option_length] == literal[int] :
identifier[value] = keyword[None]
keyword[elif] identifier[option_item] . identifier[value_type] == identifier[defines] . identifier[INTEGER] :
identifier[tmp] = identifier[values] [ identifier[pos] : identifier[pos] + identifier[option_length] ]
identifier[value] = literal[int]
keyword[for] identifier[b] keyword[in] identifier[tmp] :
identifier[value] =( identifier[value] << literal[int] )| identifier[struct] . identifier[unpack] ( literal[string] , identifier[b] . identifier[to_bytes] ( literal[int] , literal[string] ))[ literal[int] ]
keyword[elif] identifier[option_item] . identifier[value_type] == identifier[defines] . identifier[OPAQUE] :
identifier[tmp] = identifier[values] [ identifier[pos] : identifier[pos] + identifier[option_length] ]
identifier[value] = identifier[tmp]
keyword[else] :
identifier[value] = identifier[values] [ identifier[pos] : identifier[pos] + identifier[option_length] ]
identifier[option] = identifier[Option] ()
identifier[option] . identifier[number] = identifier[current_option]
identifier[option] . identifier[value] = identifier[Serializer] . identifier[convert_to_raw] ( identifier[current_option] , identifier[value] , identifier[option_length] )
identifier[message] . identifier[add_option] ( identifier[option] )
keyword[if] identifier[option] . identifier[number] == identifier[defines] . identifier[OptionRegistry] . identifier[CONTENT_TYPE] . identifier[number] :
identifier[message] . identifier[payload_type] = identifier[option] . identifier[value]
keyword[finally] :
identifier[pos] += identifier[option_length]
keyword[else] :
keyword[if] identifier[length_packet] <= identifier[pos] :
keyword[raise] identifier[AttributeError] ( literal[string] %( identifier[length_packet] , identifier[pos] ))
identifier[message] . identifier[payload] = literal[string]
identifier[payload] = identifier[values] [ identifier[pos] :]
keyword[try] :
keyword[if] identifier[message] . identifier[payload_type] == identifier[defines] . identifier[Content_types] [ literal[string] ]:
identifier[message] . identifier[payload] = identifier[payload]
keyword[else] :
identifier[message] . identifier[payload] = identifier[payload] . identifier[decode] ( literal[string] )
keyword[except] identifier[AttributeError] :
identifier[message] . identifier[payload] = identifier[payload] . identifier[decode] ( literal[string] )
identifier[pos] += identifier[len] ( identifier[payload] )
keyword[return] identifier[message]
keyword[except] identifier[AttributeError] :
keyword[return] identifier[defines] . identifier[Codes] . identifier[BAD_REQUEST] . identifier[number]
keyword[except] identifier[struct] . identifier[error] :
keyword[return] identifier[defines] . identifier[Codes] . identifier[BAD_REQUEST] . identifier[number] | def deserialize(datagram, source):
"""
De-serialize a stream of byte to a message.
:param datagram: the incoming udp message
:param source: the source address and port (ip, port)
:return: the message
:rtype: Message
"""
try:
fmt = '!BBH'
pos = struct.calcsize(fmt)
s = struct.Struct(fmt)
values = s.unpack_from(datagram)
first = values[0]
code = values[1]
mid = values[2]
version = (first & 192) >> 6
message_type = (first & 48) >> 4
token_length = first & 15
if Serializer.is_response(code):
message = Response()
message.code = code # depends on [control=['if'], data=[]]
elif Serializer.is_request(code):
message = Request()
message.code = code # depends on [control=['if'], data=[]]
else:
message = Message()
message.source = source
message.destination = None
message.version = version
message.type = message_type
message.mid = mid
if token_length > 0:
fmt = '%ss' % token_length
s = struct.Struct(fmt)
token_value = s.unpack_from(datagram[pos:])[0]
message.token = token_value.decode('utf-8') # depends on [control=['if'], data=['token_length']]
else:
message.token = None
pos += token_length
current_option = 0
values = datagram[pos:]
length_packet = len(values)
pos = 0
while pos < length_packet:
next_byte = struct.unpack('B', values[pos].to_bytes(1, 'big'))[0]
pos += 1
if next_byte != int(defines.PAYLOAD_MARKER):
# the first 4 bits of the byte represent the option delta
# delta = self._reader.read(4).uint
(num, option_length, pos) = Serializer.read_option_value_len_from_byte(next_byte, pos, values)
current_option += num
# read option
try:
option_item = defines.OptionRegistry.LIST[current_option] # depends on [control=['try'], data=[]]
except KeyError:
(opt_critical, _, _) = defines.OptionRegistry.get_option_flags(current_option)
if opt_critical:
raise AttributeError('Critical option %s unknown' % current_option) # depends on [control=['if'], data=[]]
else:
# If the non-critical option is unknown
# (vendor-specific, proprietary) - just skip it
#log.err("unrecognized option %d" % current_option)
pass # depends on [control=['except'], data=[]]
else:
if option_length == 0:
value = None # depends on [control=['if'], data=[]]
elif option_item.value_type == defines.INTEGER:
tmp = values[pos:pos + option_length]
value = 0
for b in tmp:
value = value << 8 | struct.unpack('B', b.to_bytes(1, 'big'))[0] # depends on [control=['for'], data=['b']] # depends on [control=['if'], data=[]]
elif option_item.value_type == defines.OPAQUE:
tmp = values[pos:pos + option_length]
value = tmp # depends on [control=['if'], data=[]]
else:
value = values[pos:pos + option_length]
option = Option()
option.number = current_option
option.value = Serializer.convert_to_raw(current_option, value, option_length)
message.add_option(option)
if option.number == defines.OptionRegistry.CONTENT_TYPE.number:
message.payload_type = option.value # depends on [control=['if'], data=[]]
finally:
pos += option_length # depends on [control=['if'], data=['next_byte']]
else:
if length_packet <= pos:
# log.err("Payload Marker with no payload")
raise AttributeError('Packet length %s, pos %s' % (length_packet, pos)) # depends on [control=['if'], data=['length_packet', 'pos']]
message.payload = ''
payload = values[pos:]
try:
if message.payload_type == defines.Content_types['application/octet-stream']:
message.payload = payload # depends on [control=['if'], data=[]]
else:
message.payload = payload.decode('utf-8') # depends on [control=['try'], data=[]]
except AttributeError:
message.payload = payload.decode('utf-8') # depends on [control=['except'], data=[]]
pos += len(payload) # depends on [control=['while'], data=['pos', 'length_packet']]
return message # depends on [control=['try'], data=[]]
except AttributeError:
return defines.Codes.BAD_REQUEST.number # depends on [control=['except'], data=[]]
except struct.error:
return defines.Codes.BAD_REQUEST.number # depends on [control=['except'], data=[]] |
def _decode_response(response):
"""Strip off Gerrit's magic prefix and decode a response.
:returns:
Decoded JSON content as a dict, or raw text if content could not be
decoded as JSON.
:raises:
requests.HTTPError if the response contains an HTTP error status code.
"""
content_type = response.headers.get('content-type', '')
logger.debug("status[%s] content_type[%s] encoding[%s]" %
(response.status_code, content_type, response.encoding))
response.raise_for_status()
content = response.content.strip()
if response.encoding:
content = content.decode(response.encoding)
if not content:
logger.debug("no content in response")
return content
if content_type.split(';')[0] != 'application/json':
return content
if content.startswith(GERRIT_MAGIC_JSON_PREFIX):
content = content[len(GERRIT_MAGIC_JSON_PREFIX):]
try:
return json.loads(content)
except ValueError:
logger.error('Invalid json content: %s', content)
raise | def function[_decode_response, parameter[response]]:
constant[Strip off Gerrit's magic prefix and decode a response.
:returns:
Decoded JSON content as a dict, or raw text if content could not be
decoded as JSON.
:raises:
requests.HTTPError if the response contains an HTTP error status code.
]
variable[content_type] assign[=] call[name[response].headers.get, parameter[constant[content-type], constant[]]]
call[name[logger].debug, parameter[binary_operation[constant[status[%s] content_type[%s] encoding[%s]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18c4ceb90>, <ast.Name object at 0x7da18c4ce080>, <ast.Attribute object at 0x7da18c4cfa60>]]]]]
call[name[response].raise_for_status, parameter[]]
variable[content] assign[=] call[name[response].content.strip, parameter[]]
if name[response].encoding begin[:]
variable[content] assign[=] call[name[content].decode, parameter[name[response].encoding]]
if <ast.UnaryOp object at 0x7da1b1a5db40> begin[:]
call[name[logger].debug, parameter[constant[no content in response]]]
return[name[content]]
if compare[call[call[name[content_type].split, parameter[constant[;]]]][constant[0]] not_equal[!=] constant[application/json]] begin[:]
return[name[content]]
if call[name[content].startswith, parameter[name[GERRIT_MAGIC_JSON_PREFIX]]] begin[:]
variable[content] assign[=] call[name[content]][<ast.Slice object at 0x7da1b1a5db10>]
<ast.Try object at 0x7da1b1a5f160> | keyword[def] identifier[_decode_response] ( identifier[response] ):
literal[string]
identifier[content_type] = identifier[response] . identifier[headers] . identifier[get] ( literal[string] , literal[string] )
identifier[logger] . identifier[debug] ( literal[string] %
( identifier[response] . identifier[status_code] , identifier[content_type] , identifier[response] . identifier[encoding] ))
identifier[response] . identifier[raise_for_status] ()
identifier[content] = identifier[response] . identifier[content] . identifier[strip] ()
keyword[if] identifier[response] . identifier[encoding] :
identifier[content] = identifier[content] . identifier[decode] ( identifier[response] . identifier[encoding] )
keyword[if] keyword[not] identifier[content] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] identifier[content]
keyword[if] identifier[content_type] . identifier[split] ( literal[string] )[ literal[int] ]!= literal[string] :
keyword[return] identifier[content]
keyword[if] identifier[content] . identifier[startswith] ( identifier[GERRIT_MAGIC_JSON_PREFIX] ):
identifier[content] = identifier[content] [ identifier[len] ( identifier[GERRIT_MAGIC_JSON_PREFIX] ):]
keyword[try] :
keyword[return] identifier[json] . identifier[loads] ( identifier[content] )
keyword[except] identifier[ValueError] :
identifier[logger] . identifier[error] ( literal[string] , identifier[content] )
keyword[raise] | def _decode_response(response):
"""Strip off Gerrit's magic prefix and decode a response.
:returns:
Decoded JSON content as a dict, or raw text if content could not be
decoded as JSON.
:raises:
requests.HTTPError if the response contains an HTTP error status code.
"""
content_type = response.headers.get('content-type', '')
logger.debug('status[%s] content_type[%s] encoding[%s]' % (response.status_code, content_type, response.encoding))
response.raise_for_status()
content = response.content.strip()
if response.encoding:
content = content.decode(response.encoding) # depends on [control=['if'], data=[]]
if not content:
logger.debug('no content in response')
return content # depends on [control=['if'], data=[]]
if content_type.split(';')[0] != 'application/json':
return content # depends on [control=['if'], data=[]]
if content.startswith(GERRIT_MAGIC_JSON_PREFIX):
content = content[len(GERRIT_MAGIC_JSON_PREFIX):] # depends on [control=['if'], data=[]]
try:
return json.loads(content) # depends on [control=['try'], data=[]]
except ValueError:
logger.error('Invalid json content: %s', content)
raise # depends on [control=['except'], data=[]] |
def format_step(step, zero_prefix=False):
"""Return the step value in format suitable for display."""
if isinstance(step, int):
return "{:06}".format(step) if zero_prefix else "{}".format(step)
elif isinstance(step, tuple):
return "{:04}:{:06}".format(*step) if zero_prefix else "{}:{}".format(*step) | def function[format_step, parameter[step, zero_prefix]]:
constant[Return the step value in format suitable for display.]
if call[name[isinstance], parameter[name[step], name[int]]] begin[:]
return[<ast.IfExp object at 0x7da1b216d210>] | keyword[def] identifier[format_step] ( identifier[step] , identifier[zero_prefix] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[step] , identifier[int] ):
keyword[return] literal[string] . identifier[format] ( identifier[step] ) keyword[if] identifier[zero_prefix] keyword[else] literal[string] . identifier[format] ( identifier[step] )
keyword[elif] identifier[isinstance] ( identifier[step] , identifier[tuple] ):
keyword[return] literal[string] . identifier[format] (* identifier[step] ) keyword[if] identifier[zero_prefix] keyword[else] literal[string] . identifier[format] (* identifier[step] ) | def format_step(step, zero_prefix=False):
"""Return the step value in format suitable for display."""
if isinstance(step, int):
return '{:06}'.format(step) if zero_prefix else '{}'.format(step) # depends on [control=['if'], data=[]]
elif isinstance(step, tuple):
return '{:04}:{:06}'.format(*step) if zero_prefix else '{}:{}'.format(*step) # depends on [control=['if'], data=[]] |
def _convert_localized_value(value: LocalizedValue) -> LocalizedIntegerValue:
"""Converts from :see:LocalizedValue to :see:LocalizedIntegerValue."""
integer_values = {}
for lang_code, _ in settings.LANGUAGES:
local_value = value.get(lang_code, None)
if local_value is None or local_value.strip() == '':
local_value = None
try:
integer_values[lang_code] = int(local_value)
except (ValueError, TypeError):
integer_values[lang_code] = None
return LocalizedIntegerValue(integer_values) | def function[_convert_localized_value, parameter[value]]:
constant[Converts from :see:LocalizedValue to :see:LocalizedIntegerValue.]
variable[integer_values] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b084d390>, <ast.Name object at 0x7da1b084e500>]]] in starred[name[settings].LANGUAGES] begin[:]
variable[local_value] assign[=] call[name[value].get, parameter[name[lang_code], constant[None]]]
if <ast.BoolOp object at 0x7da1b084d660> begin[:]
variable[local_value] assign[=] constant[None]
<ast.Try object at 0x7da1b088cf70>
return[call[name[LocalizedIntegerValue], parameter[name[integer_values]]]] | keyword[def] identifier[_convert_localized_value] ( identifier[value] : identifier[LocalizedValue] )-> identifier[LocalizedIntegerValue] :
literal[string]
identifier[integer_values] ={}
keyword[for] identifier[lang_code] , identifier[_] keyword[in] identifier[settings] . identifier[LANGUAGES] :
identifier[local_value] = identifier[value] . identifier[get] ( identifier[lang_code] , keyword[None] )
keyword[if] identifier[local_value] keyword[is] keyword[None] keyword[or] identifier[local_value] . identifier[strip] ()== literal[string] :
identifier[local_value] = keyword[None]
keyword[try] :
identifier[integer_values] [ identifier[lang_code] ]= identifier[int] ( identifier[local_value] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
identifier[integer_values] [ identifier[lang_code] ]= keyword[None]
keyword[return] identifier[LocalizedIntegerValue] ( identifier[integer_values] ) | def _convert_localized_value(value: LocalizedValue) -> LocalizedIntegerValue:
"""Converts from :see:LocalizedValue to :see:LocalizedIntegerValue."""
integer_values = {}
for (lang_code, _) in settings.LANGUAGES:
local_value = value.get(lang_code, None)
if local_value is None or local_value.strip() == '':
local_value = None # depends on [control=['if'], data=[]]
try:
integer_values[lang_code] = int(local_value) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
integer_values[lang_code] = None # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return LocalizedIntegerValue(integer_values) |
def close(self, *args, **kwargs):
"""
Engine closed, copy file to DB if it has changed
"""
super(DatabaseWrapper, self).close(*args, **kwargs)
signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4")
s3 = boto3.resource(
's3',
config=botocore.client.Config(signature_version=signature_version),
)
try:
with open(self.settings_dict['NAME'], 'rb') as f:
fb = f.read()
m = hashlib.md5()
m.update(fb)
if self.db_hash == m.hexdigest():
logging.debug("Database unchanged, not saving to remote DB!")
return
bytesIO = BytesIO()
bytesIO.write(fb)
bytesIO.seek(0)
s3_object = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['REMOTE_NAME'])
result = s3_object.put('rb', Body=bytesIO)
except Exception as e:
logging.debug(e)
logging.debug("Saved to remote DB!") | def function[close, parameter[self]]:
constant[
Engine closed, copy file to DB if it has changed
]
call[call[name[super], parameter[name[DatabaseWrapper], name[self]]].close, parameter[<ast.Starred object at 0x7da1b064ed70>]]
variable[signature_version] assign[=] call[name[self].settings_dict.get, parameter[constant[SIGNATURE_VERSION], constant[s3v4]]]
variable[s3] assign[=] call[name[boto3].resource, parameter[constant[s3]]]
<ast.Try object at 0x7da20c9905b0>
call[name[logging].debug, parameter[constant[Saved to remote DB!]]] | keyword[def] identifier[close] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[super] ( identifier[DatabaseWrapper] , identifier[self] ). identifier[close] (* identifier[args] ,** identifier[kwargs] )
identifier[signature_version] = identifier[self] . identifier[settings_dict] . identifier[get] ( literal[string] , literal[string] )
identifier[s3] = identifier[boto3] . identifier[resource] (
literal[string] ,
identifier[config] = identifier[botocore] . identifier[client] . identifier[Config] ( identifier[signature_version] = identifier[signature_version] ),
)
keyword[try] :
keyword[with] identifier[open] ( identifier[self] . identifier[settings_dict] [ literal[string] ], literal[string] ) keyword[as] identifier[f] :
identifier[fb] = identifier[f] . identifier[read] ()
identifier[m] = identifier[hashlib] . identifier[md5] ()
identifier[m] . identifier[update] ( identifier[fb] )
keyword[if] identifier[self] . identifier[db_hash] == identifier[m] . identifier[hexdigest] ():
identifier[logging] . identifier[debug] ( literal[string] )
keyword[return]
identifier[bytesIO] = identifier[BytesIO] ()
identifier[bytesIO] . identifier[write] ( identifier[fb] )
identifier[bytesIO] . identifier[seek] ( literal[int] )
identifier[s3_object] = identifier[s3] . identifier[Object] ( identifier[self] . identifier[settings_dict] [ literal[string] ], identifier[self] . identifier[settings_dict] [ literal[string] ])
identifier[result] = identifier[s3_object] . identifier[put] ( literal[string] , identifier[Body] = identifier[bytesIO] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logging] . identifier[debug] ( identifier[e] )
identifier[logging] . identifier[debug] ( literal[string] ) | def close(self, *args, **kwargs):
"""
Engine closed, copy file to DB if it has changed
"""
super(DatabaseWrapper, self).close(*args, **kwargs)
signature_version = self.settings_dict.get('SIGNATURE_VERSION', 's3v4')
s3 = boto3.resource('s3', config=botocore.client.Config(signature_version=signature_version))
try:
with open(self.settings_dict['NAME'], 'rb') as f:
fb = f.read()
m = hashlib.md5()
m.update(fb)
if self.db_hash == m.hexdigest():
logging.debug('Database unchanged, not saving to remote DB!')
return # depends on [control=['if'], data=[]]
bytesIO = BytesIO()
bytesIO.write(fb)
bytesIO.seek(0)
s3_object = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['REMOTE_NAME'])
result = s3_object.put('rb', Body=bytesIO) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except Exception as e:
logging.debug(e) # depends on [control=['except'], data=['e']]
logging.debug('Saved to remote DB!') |
def right_corner_label(self, label, position=None, rotation=0, offset=0.08,
**kwargs):
"""
Sets the label on the right corner (complements left axis).
Parameters
----------
label: String
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
"""
if not position:
position = (1, offset / 2, 0)
self._corner_labels["right"] = (label, position, rotation, kwargs) | def function[right_corner_label, parameter[self, label, position, rotation, offset]]:
constant[
Sets the label on the right corner (complements left axis).
Parameters
----------
label: String
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
]
if <ast.UnaryOp object at 0x7da18bc72aa0> begin[:]
variable[position] assign[=] tuple[[<ast.Constant object at 0x7da18bc73a30>, <ast.BinOp object at 0x7da207f00d90>, <ast.Constant object at 0x7da207f024d0>]]
call[name[self]._corner_labels][constant[right]] assign[=] tuple[[<ast.Name object at 0x7da207f01c00>, <ast.Name object at 0x7da207f005b0>, <ast.Name object at 0x7da207f00bb0>, <ast.Name object at 0x7da207f03ca0>]] | keyword[def] identifier[right_corner_label] ( identifier[self] , identifier[label] , identifier[position] = keyword[None] , identifier[rotation] = literal[int] , identifier[offset] = literal[int] ,
** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[position] :
identifier[position] =( literal[int] , identifier[offset] / literal[int] , literal[int] )
identifier[self] . identifier[_corner_labels] [ literal[string] ]=( identifier[label] , identifier[position] , identifier[rotation] , identifier[kwargs] ) | def right_corner_label(self, label, position=None, rotation=0, offset=0.08, **kwargs):
"""
Sets the label on the right corner (complements left axis).
Parameters
----------
label: String
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
"""
if not position:
position = (1, offset / 2, 0) # depends on [control=['if'], data=[]]
self._corner_labels['right'] = (label, position, rotation, kwargs) |
def sort_by_distance(cls, consumer_offsets_metadata):
"""Receives a dict of (topic_name: ConsumerPartitionOffset) and returns a
similar dict where the topics are sorted by total offset distance."""
sorted_offsets = sorted(
list(consumer_offsets_metadata.items()),
key=lambda topic_offsets: sum([o.highmark - o.current for o in topic_offsets[1]])
)
return OrderedDict(sorted_offsets) | def function[sort_by_distance, parameter[cls, consumer_offsets_metadata]]:
constant[Receives a dict of (topic_name: ConsumerPartitionOffset) and returns a
similar dict where the topics are sorted by total offset distance.]
variable[sorted_offsets] assign[=] call[name[sorted], parameter[call[name[list], parameter[call[name[consumer_offsets_metadata].items, parameter[]]]]]]
return[call[name[OrderedDict], parameter[name[sorted_offsets]]]] | keyword[def] identifier[sort_by_distance] ( identifier[cls] , identifier[consumer_offsets_metadata] ):
literal[string]
identifier[sorted_offsets] = identifier[sorted] (
identifier[list] ( identifier[consumer_offsets_metadata] . identifier[items] ()),
identifier[key] = keyword[lambda] identifier[topic_offsets] : identifier[sum] ([ identifier[o] . identifier[highmark] - identifier[o] . identifier[current] keyword[for] identifier[o] keyword[in] identifier[topic_offsets] [ literal[int] ]])
)
keyword[return] identifier[OrderedDict] ( identifier[sorted_offsets] ) | def sort_by_distance(cls, consumer_offsets_metadata):
"""Receives a dict of (topic_name: ConsumerPartitionOffset) and returns a
similar dict where the topics are sorted by total offset distance."""
sorted_offsets = sorted(list(consumer_offsets_metadata.items()), key=lambda topic_offsets: sum([o.highmark - o.current for o in topic_offsets[1]]))
return OrderedDict(sorted_offsets) |
def HTMLHelpWorkshop(self):
"""
Microsoft HTML Help Workshop
"""
if self.vc_ver < 11.0:
return []
return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')] | def function[HTMLHelpWorkshop, parameter[self]]:
constant[
Microsoft HTML Help Workshop
]
if compare[name[self].vc_ver less[<] constant[11.0]] begin[:]
return[list[[]]]
return[list[[<ast.Call object at 0x7da1b1bc06a0>]]] | keyword[def] identifier[HTMLHelpWorkshop] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[vc_ver] < literal[int] :
keyword[return] []
keyword[return] [ identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[si] . identifier[ProgramFilesx86] , literal[string] )] | def HTMLHelpWorkshop(self):
"""
Microsoft HTML Help Workshop
"""
if self.vc_ver < 11.0:
return [] # depends on [control=['if'], data=[]]
return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')] |
def writelines(self, sequence):
"""Write a sequence of strings to the file.
Does not add separators.
"""
iterator = iter(sequence)
def iterate(_=None):
try:
return self.write(next(iterator)).addCallback(iterate)
except StopIteration:
return
return defer.maybeDeferred(iterate) | def function[writelines, parameter[self, sequence]]:
constant[Write a sequence of strings to the file.
Does not add separators.
]
variable[iterator] assign[=] call[name[iter], parameter[name[sequence]]]
def function[iterate, parameter[_]]:
<ast.Try object at 0x7da18f09c280>
return[call[name[defer].maybeDeferred, parameter[name[iterate]]]] | keyword[def] identifier[writelines] ( identifier[self] , identifier[sequence] ):
literal[string]
identifier[iterator] = identifier[iter] ( identifier[sequence] )
keyword[def] identifier[iterate] ( identifier[_] = keyword[None] ):
keyword[try] :
keyword[return] identifier[self] . identifier[write] ( identifier[next] ( identifier[iterator] )). identifier[addCallback] ( identifier[iterate] )
keyword[except] identifier[StopIteration] :
keyword[return]
keyword[return] identifier[defer] . identifier[maybeDeferred] ( identifier[iterate] ) | def writelines(self, sequence):
"""Write a sequence of strings to the file.
Does not add separators.
"""
iterator = iter(sequence)
def iterate(_=None):
try:
return self.write(next(iterator)).addCallback(iterate) # depends on [control=['try'], data=[]]
except StopIteration:
return # depends on [control=['except'], data=[]]
return defer.maybeDeferred(iterate) |
def tweetqueue(ctx, dry_run, config):
"""A command line tool for time-delaying your tweets."""
ctx.obj = {}
ctx.obj['DRYRUN'] = dry_run
# If the subcommand is "config", bypass all setup code
if ctx.invoked_subcommand == 'config':
return
# If the config file wasn't provided, attempt to load the default one.
if config is None:
user_home = os.path.expanduser("~")
default_config = os.path.join(user_home, ".tweetqueue")
if not os.path.isfile(default_config):
click.echo("Default configuration was not found and none was provided.")
click.echo("Run 'tweetqueue config' to create one.")
ctx.exit(1)
config = open(default_config, 'rb')
try:
ctx.obj['config'] = json.load(config)
except Exception as e:
click.echo("Unable to read configuration file.")
click.echo("Are you sure it is valid JSON")
click.echo("JSON Error: " + e.message)
ctx.exit(1)
# Verify that the config file has all required options
required_config = [
'API_KEY',
'API_SECRET',
'ACCESS_TOKEN',
'ACCESS_TOKEN_SECRET',
'DATABASE_LOCATION'
]
if not all(key in ctx.obj['config'] for key in required_config):
click.echo("Missing required value in config file.")
ctx.exit(1)
# Make a tweepy api object for the context
auth = tweepy.OAuthHandler(
ctx.obj['config']['API_KEY'],
ctx.obj['config']['API_SECRET']
)
auth.set_access_token(
ctx.obj['config']['ACCESS_TOKEN'],
ctx.obj['config']['ACCESS_TOKEN_SECRET']
)
ctx.obj['TWEEPY_API'] = tweepy.API(auth)
ctx.obj['TWEETLIST'] = TweetList(ctx.obj['config']['DATABASE_LOCATION']) | def function[tweetqueue, parameter[ctx, dry_run, config]]:
constant[A command line tool for time-delaying your tweets.]
name[ctx].obj assign[=] dictionary[[], []]
call[name[ctx].obj][constant[DRYRUN]] assign[=] name[dry_run]
if compare[name[ctx].invoked_subcommand equal[==] constant[config]] begin[:]
return[None]
if compare[name[config] is constant[None]] begin[:]
variable[user_home] assign[=] call[name[os].path.expanduser, parameter[constant[~]]]
variable[default_config] assign[=] call[name[os].path.join, parameter[name[user_home], constant[.tweetqueue]]]
if <ast.UnaryOp object at 0x7da18eb54970> begin[:]
call[name[click].echo, parameter[constant[Default configuration was not found and none was provided.]]]
call[name[click].echo, parameter[constant[Run 'tweetqueue config' to create one.]]]
call[name[ctx].exit, parameter[constant[1]]]
variable[config] assign[=] call[name[open], parameter[name[default_config], constant[rb]]]
<ast.Try object at 0x7da1b1455780>
variable[required_config] assign[=] list[[<ast.Constant object at 0x7da1b1454310>, <ast.Constant object at 0x7da1b1456080>, <ast.Constant object at 0x7da1b1455120>, <ast.Constant object at 0x7da1b1455720>, <ast.Constant object at 0x7da1b1455690>]]
if <ast.UnaryOp object at 0x7da1b1455180> begin[:]
call[name[click].echo, parameter[constant[Missing required value in config file.]]]
call[name[ctx].exit, parameter[constant[1]]]
variable[auth] assign[=] call[name[tweepy].OAuthHandler, parameter[call[call[name[ctx].obj][constant[config]]][constant[API_KEY]], call[call[name[ctx].obj][constant[config]]][constant[API_SECRET]]]]
call[name[auth].set_access_token, parameter[call[call[name[ctx].obj][constant[config]]][constant[ACCESS_TOKEN]], call[call[name[ctx].obj][constant[config]]][constant[ACCESS_TOKEN_SECRET]]]]
call[name[ctx].obj][constant[TWEEPY_API]] assign[=] call[name[tweepy].API, parameter[name[auth]]]
call[name[ctx].obj][constant[TWEETLIST]] assign[=] call[name[TweetList], parameter[call[call[name[ctx].obj][constant[config]]][constant[DATABASE_LOCATION]]]] | keyword[def] identifier[tweetqueue] ( identifier[ctx] , identifier[dry_run] , identifier[config] ):
literal[string]
identifier[ctx] . identifier[obj] ={}
identifier[ctx] . identifier[obj] [ literal[string] ]= identifier[dry_run]
keyword[if] identifier[ctx] . identifier[invoked_subcommand] == literal[string] :
keyword[return]
keyword[if] identifier[config] keyword[is] keyword[None] :
identifier[user_home] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
identifier[default_config] = identifier[os] . identifier[path] . identifier[join] ( identifier[user_home] , literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[default_config] ):
identifier[click] . identifier[echo] ( literal[string] )
identifier[click] . identifier[echo] ( literal[string] )
identifier[ctx] . identifier[exit] ( literal[int] )
identifier[config] = identifier[open] ( identifier[default_config] , literal[string] )
keyword[try] :
identifier[ctx] . identifier[obj] [ literal[string] ]= identifier[json] . identifier[load] ( identifier[config] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[click] . identifier[echo] ( literal[string] )
identifier[click] . identifier[echo] ( literal[string] )
identifier[click] . identifier[echo] ( literal[string] + identifier[e] . identifier[message] )
identifier[ctx] . identifier[exit] ( literal[int] )
identifier[required_config] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string]
]
keyword[if] keyword[not] identifier[all] ( identifier[key] keyword[in] identifier[ctx] . identifier[obj] [ literal[string] ] keyword[for] identifier[key] keyword[in] identifier[required_config] ):
identifier[click] . identifier[echo] ( literal[string] )
identifier[ctx] . identifier[exit] ( literal[int] )
identifier[auth] = identifier[tweepy] . identifier[OAuthHandler] (
identifier[ctx] . identifier[obj] [ literal[string] ][ literal[string] ],
identifier[ctx] . identifier[obj] [ literal[string] ][ literal[string] ]
)
identifier[auth] . identifier[set_access_token] (
identifier[ctx] . identifier[obj] [ literal[string] ][ literal[string] ],
identifier[ctx] . identifier[obj] [ literal[string] ][ literal[string] ]
)
identifier[ctx] . identifier[obj] [ literal[string] ]= identifier[tweepy] . identifier[API] ( identifier[auth] )
identifier[ctx] . identifier[obj] [ literal[string] ]= identifier[TweetList] ( identifier[ctx] . identifier[obj] [ literal[string] ][ literal[string] ]) | def tweetqueue(ctx, dry_run, config):
"""A command line tool for time-delaying your tweets."""
ctx.obj = {}
ctx.obj['DRYRUN'] = dry_run
# If the subcommand is "config", bypass all setup code
if ctx.invoked_subcommand == 'config':
return # depends on [control=['if'], data=[]]
# If the config file wasn't provided, attempt to load the default one.
if config is None:
user_home = os.path.expanduser('~')
default_config = os.path.join(user_home, '.tweetqueue')
if not os.path.isfile(default_config):
click.echo('Default configuration was not found and none was provided.')
click.echo("Run 'tweetqueue config' to create one.")
ctx.exit(1) # depends on [control=['if'], data=[]]
config = open(default_config, 'rb') # depends on [control=['if'], data=['config']]
try:
ctx.obj['config'] = json.load(config) # depends on [control=['try'], data=[]]
except Exception as e:
click.echo('Unable to read configuration file.')
click.echo('Are you sure it is valid JSON')
click.echo('JSON Error: ' + e.message)
ctx.exit(1) # depends on [control=['except'], data=['e']]
# Verify that the config file has all required options
required_config = ['API_KEY', 'API_SECRET', 'ACCESS_TOKEN', 'ACCESS_TOKEN_SECRET', 'DATABASE_LOCATION']
if not all((key in ctx.obj['config'] for key in required_config)):
click.echo('Missing required value in config file.')
ctx.exit(1) # depends on [control=['if'], data=[]]
# Make a tweepy api object for the context
auth = tweepy.OAuthHandler(ctx.obj['config']['API_KEY'], ctx.obj['config']['API_SECRET'])
auth.set_access_token(ctx.obj['config']['ACCESS_TOKEN'], ctx.obj['config']['ACCESS_TOKEN_SECRET'])
ctx.obj['TWEEPY_API'] = tweepy.API(auth)
ctx.obj['TWEETLIST'] = TweetList(ctx.obj['config']['DATABASE_LOCATION']) |
def get_item_dicts(self, buckets=None, results=15, start=0,item_ids=None):
"""
Returns data from the catalog; also expanded for the requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of dicts representing objects in the catalog; list has additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[
{
"artist_id": "AR78KRI1187B98E6F2",
"artist_name": "Art of Noise",
"date_added": "2012-04-02T16:50:02",
"foreign_id": "CAHLYLR13674D1CF83:song:1000",
"request": {
"artist_name": "The Art Of Noise",
"item_id": "1000",
"song_name": "Love"
},
"song_id": "SOSBCTO1311AFE7AE0",
"song_name": "Love"
}
]
"""
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute("read", results=results, start=start, **kwargs)
rval = ResultList(response['catalog']['items'])
if item_ids:
rval.start=0;
rval.total=len(response['catalog']['items'])
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
return rval | def function[get_item_dicts, parameter[self, buckets, results, start, item_ids]]:
constant[
Returns data from the catalog; also expanded for the requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of dicts representing objects in the catalog; list has additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[
{
"artist_id": "AR78KRI1187B98E6F2",
"artist_name": "Art of Noise",
"date_added": "2012-04-02T16:50:02",
"foreign_id": "CAHLYLR13674D1CF83:song:1000",
"request": {
"artist_name": "The Art Of Noise",
"item_id": "1000",
"song_name": "Love"
},
"song_id": "SOSBCTO1311AFE7AE0",
"song_name": "Love"
}
]
]
variable[kwargs] assign[=] dictionary[[], []]
call[name[kwargs]][constant[bucket]] assign[=] <ast.BoolOp object at 0x7da1b04a6950>
call[name[kwargs]][constant[item_id]] assign[=] <ast.BoolOp object at 0x7da1b04a6fe0>
variable[response] assign[=] call[name[self].get_attribute, parameter[constant[read]]]
variable[rval] assign[=] call[name[ResultList], parameter[call[call[name[response]][constant[catalog]]][constant[items]]]]
if name[item_ids] begin[:]
name[rval].start assign[=] constant[0]
name[rval].total assign[=] call[name[len], parameter[call[call[name[response]][constant[catalog]]][constant[items]]]]
return[name[rval]] | keyword[def] identifier[get_item_dicts] ( identifier[self] , identifier[buckets] = keyword[None] , identifier[results] = literal[int] , identifier[start] = literal[int] , identifier[item_ids] = keyword[None] ):
literal[string]
identifier[kwargs] ={}
identifier[kwargs] [ literal[string] ]= identifier[buckets] keyword[or] []
identifier[kwargs] [ literal[string] ]= identifier[item_ids] keyword[or] []
identifier[response] = identifier[self] . identifier[get_attribute] ( literal[string] , identifier[results] = identifier[results] , identifier[start] = identifier[start] ,** identifier[kwargs] )
identifier[rval] = identifier[ResultList] ( identifier[response] [ literal[string] ][ literal[string] ])
keyword[if] identifier[item_ids] :
identifier[rval] . identifier[start] = literal[int] ;
identifier[rval] . identifier[total] = identifier[len] ( identifier[response] [ literal[string] ][ literal[string] ])
keyword[else] :
identifier[rval] . identifier[start] = identifier[response] [ literal[string] ][ literal[string] ]
identifier[rval] . identifier[total] = identifier[response] [ literal[string] ][ literal[string] ]
keyword[return] identifier[rval] | def get_item_dicts(self, buckets=None, results=15, start=0, item_ids=None):
"""
Returns data from the catalog; also expanded for the requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of dicts representing objects in the catalog; list has additional attributes 'start' and 'total'
Example:
>>> c
<catalog - my_songs>
>>> c.read_items(results=1)
[
{
"artist_id": "AR78KRI1187B98E6F2",
"artist_name": "Art of Noise",
"date_added": "2012-04-02T16:50:02",
"foreign_id": "CAHLYLR13674D1CF83:song:1000",
"request": {
"artist_name": "The Art Of Noise",
"item_id": "1000",
"song_name": "Love"
},
"song_id": "SOSBCTO1311AFE7AE0",
"song_name": "Love"
}
]
"""
kwargs = {}
kwargs['bucket'] = buckets or []
kwargs['item_id'] = item_ids or []
response = self.get_attribute('read', results=results, start=start, **kwargs)
rval = ResultList(response['catalog']['items'])
if item_ids:
rval.start = 0
rval.total = len(response['catalog']['items']) # depends on [control=['if'], data=[]]
else:
rval.start = response['catalog']['start']
rval.total = response['catalog']['total']
return rval |
async def connect_model(self, model_name=None):
"""Connect to a model by name. If either controller or model
parts of the name are empty, the current controller and/or model
will be used.
:param str model: <controller>:<model>
"""
try:
controller_name, model_name = self.jujudata.parse_model(model_name)
controller = self.jujudata.controllers().get(controller_name)
except JujuError as e:
raise JujuConnectionError(e.message) from e
if controller is None:
raise JujuConnectionError('Controller {} not found'.format(
controller_name))
# TODO change Connection so we can pass all the endpoints
# instead of just the first one.
endpoint = controller['api-endpoints'][0]
account = self.jujudata.accounts().get(controller_name, {})
models = self.jujudata.models().get(controller_name, {}).get('models',
{})
if model_name not in models:
raise JujuConnectionError('Model not found: {}'.format(model_name))
# TODO if there's no record for the required model name, connect
# to the controller to find out the model's uuid, then connect
# to that. This will let connect_model work with models that
# haven't necessarily synced with the local juju data,
# and also remove the need for base.CleanModel to
# subclass JujuData.
await self.connect(
endpoint=endpoint,
uuid=models[model_name]['uuid'],
username=account.get('user'),
password=account.get('password'),
cacert=controller.get('ca-cert'),
bakery_client=self.bakery_client_for_controller(controller_name),
)
self.controller_name = controller_name
self.model_name = controller_name + ':' + model_name | <ast.AsyncFunctionDef object at 0x7da1b0dbc490> | keyword[async] keyword[def] identifier[connect_model] ( identifier[self] , identifier[model_name] = keyword[None] ):
literal[string]
keyword[try] :
identifier[controller_name] , identifier[model_name] = identifier[self] . identifier[jujudata] . identifier[parse_model] ( identifier[model_name] )
identifier[controller] = identifier[self] . identifier[jujudata] . identifier[controllers] (). identifier[get] ( identifier[controller_name] )
keyword[except] identifier[JujuError] keyword[as] identifier[e] :
keyword[raise] identifier[JujuConnectionError] ( identifier[e] . identifier[message] ) keyword[from] identifier[e]
keyword[if] identifier[controller] keyword[is] keyword[None] :
keyword[raise] identifier[JujuConnectionError] ( literal[string] . identifier[format] (
identifier[controller_name] ))
identifier[endpoint] = identifier[controller] [ literal[string] ][ literal[int] ]
identifier[account] = identifier[self] . identifier[jujudata] . identifier[accounts] (). identifier[get] ( identifier[controller_name] ,{})
identifier[models] = identifier[self] . identifier[jujudata] . identifier[models] (). identifier[get] ( identifier[controller_name] ,{}). identifier[get] ( literal[string] ,
{})
keyword[if] identifier[model_name] keyword[not] keyword[in] identifier[models] :
keyword[raise] identifier[JujuConnectionError] ( literal[string] . identifier[format] ( identifier[model_name] ))
keyword[await] identifier[self] . identifier[connect] (
identifier[endpoint] = identifier[endpoint] ,
identifier[uuid] = identifier[models] [ identifier[model_name] ][ literal[string] ],
identifier[username] = identifier[account] . identifier[get] ( literal[string] ),
identifier[password] = identifier[account] . identifier[get] ( literal[string] ),
identifier[cacert] = identifier[controller] . identifier[get] ( literal[string] ),
identifier[bakery_client] = identifier[self] . identifier[bakery_client_for_controller] ( identifier[controller_name] ),
)
identifier[self] . identifier[controller_name] = identifier[controller_name]
identifier[self] . identifier[model_name] = identifier[controller_name] + literal[string] + identifier[model_name] | async def connect_model(self, model_name=None):
"""Connect to a model by name. If either controller or model
parts of the name are empty, the current controller and/or model
will be used.
:param str model: <controller>:<model>
"""
try:
(controller_name, model_name) = self.jujudata.parse_model(model_name)
controller = self.jujudata.controllers().get(controller_name) # depends on [control=['try'], data=[]]
except JujuError as e:
raise JujuConnectionError(e.message) from e # depends on [control=['except'], data=['e']]
if controller is None:
raise JujuConnectionError('Controller {} not found'.format(controller_name)) # depends on [control=['if'], data=[]]
# TODO change Connection so we can pass all the endpoints
# instead of just the first one.
endpoint = controller['api-endpoints'][0]
account = self.jujudata.accounts().get(controller_name, {})
models = self.jujudata.models().get(controller_name, {}).get('models', {})
if model_name not in models:
raise JujuConnectionError('Model not found: {}'.format(model_name)) # depends on [control=['if'], data=['model_name']]
# TODO if there's no record for the required model name, connect
# to the controller to find out the model's uuid, then connect
# to that. This will let connect_model work with models that
# haven't necessarily synced with the local juju data,
# and also remove the need for base.CleanModel to
# subclass JujuData.
await self.connect(endpoint=endpoint, uuid=models[model_name]['uuid'], username=account.get('user'), password=account.get('password'), cacert=controller.get('ca-cert'), bakery_client=self.bakery_client_for_controller(controller_name))
self.controller_name = controller_name
self.model_name = controller_name + ':' + model_name |
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of
sequences (e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result | def function[fetchall, parameter[self]]:
constant[
Fetch all (remaining) rows of a query result, returning them as a sequence of
sequences (e.g. a list of tuples).
]
variable[result] assign[=] list[[]]
while constant[True] begin[:]
variable[one] assign[=] call[name[self].fetchone, parameter[]]
if compare[name[one] is constant[None]] begin[:]
break
return[name[result]] | keyword[def] identifier[fetchall] ( identifier[self] ):
literal[string]
identifier[result] =[]
keyword[while] keyword[True] :
identifier[one] = identifier[self] . identifier[fetchone] ()
keyword[if] identifier[one] keyword[is] keyword[None] :
keyword[break]
keyword[else] :
identifier[result] . identifier[append] ( identifier[one] )
keyword[return] identifier[result] | def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of
sequences (e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break # depends on [control=['if'], data=[]]
else:
result.append(one) # depends on [control=['while'], data=[]]
return result |
def rarely(fn):
"""
Only 5% chance of happening
"""
def wrapped(*args, **kwargs):
if in_percentage(5):
fn(*args, **kwargs)
return wrapped | def function[rarely, parameter[fn]]:
constant[
Only 5% chance of happening
]
def function[wrapped, parameter[]]:
if call[name[in_percentage], parameter[constant[5]]] begin[:]
call[name[fn], parameter[<ast.Starred object at 0x7da1b16411b0>]]
return[name[wrapped]] | keyword[def] identifier[rarely] ( identifier[fn] ):
literal[string]
keyword[def] identifier[wrapped] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[in_percentage] ( literal[int] ):
identifier[fn] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapped] | def rarely(fn):
"""
Only 5% chance of happening
"""
def wrapped(*args, **kwargs):
if in_percentage(5):
fn(*args, **kwargs) # depends on [control=['if'], data=[]]
return wrapped |
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl) | def function[setdummies, parameter[self, e]]:
constant[creates and defines all needed dummy vertices for edge e.
]
<ast.Tuple object at 0x7da1b1024550> assign[=] name[e].v
<ast.Tuple object at 0x7da1b1024700> assign[=] tuple[[<ast.Attribute object at 0x7da1b1025030>, <ast.Attribute object at 0x7da1b1025e10>]]
if compare[name[r0] greater[>] name[r1]] begin[:]
assert[compare[name[e] in name[self].alt_e]]
<ast.Tuple object at 0x7da1b1024940> assign[=] tuple[[<ast.Name object at 0x7da1b1027e50>, <ast.Name object at 0x7da1b1024eb0>]]
<ast.Tuple object at 0x7da1b1025210> assign[=] tuple[[<ast.Name object at 0x7da1b1025870>, <ast.Name object at 0x7da1b1024d30>]]
if compare[binary_operation[name[r1] - name[r0]] greater[>] constant[1]] begin[:]
variable[ctrl] assign[=] dictionary[[], []]
call[name[ctrl]][name[r0]] assign[=] name[v0]
call[name[ctrl]][name[r1]] assign[=] name[v1]
for taget[name[r]] in starred[call[name[xrange], parameter[binary_operation[name[r0] + constant[1]], name[r1]]]] begin[:]
call[name[self].dummyctrl, parameter[name[r], name[ctrl]]] | keyword[def] identifier[setdummies] ( identifier[self] , identifier[e] ):
literal[string]
identifier[v0] , identifier[v1] = identifier[e] . identifier[v]
identifier[r0] , identifier[r1] = identifier[self] . identifier[grx] [ identifier[v0] ]. identifier[rank] , identifier[self] . identifier[grx] [ identifier[v1] ]. identifier[rank]
keyword[if] identifier[r0] > identifier[r1] :
keyword[assert] identifier[e] keyword[in] identifier[self] . identifier[alt_e]
identifier[v0] , identifier[v1] = identifier[v1] , identifier[v0]
identifier[r0] , identifier[r1] = identifier[r1] , identifier[r0]
keyword[if] ( identifier[r1] - identifier[r0] )> literal[int] :
identifier[ctrl] = identifier[self] . identifier[ctrls] [ identifier[e] ]={}
identifier[ctrl] [ identifier[r0] ]= identifier[v0]
identifier[ctrl] [ identifier[r1] ]= identifier[v1]
keyword[for] identifier[r] keyword[in] identifier[xrange] ( identifier[r0] + literal[int] , identifier[r1] ):
identifier[self] . identifier[dummyctrl] ( identifier[r] , identifier[ctrl] ) | def setdummies(self, e):
"""creates and defines all needed dummy vertices for edge e.
"""
(v0, v1) = e.v
(r0, r1) = (self.grx[v0].rank, self.grx[v1].rank)
if r0 > r1:
assert e in self.alt_e
(v0, v1) = (v1, v0)
(r0, r1) = (r1, r0) # depends on [control=['if'], data=['r0', 'r1']]
if r1 - r0 > 1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl = self.ctrls[e] = {}
ctrl[r0] = v0
ctrl[r1] = v1
for r in xrange(r0 + 1, r1):
self.dummyctrl(r, ctrl) # depends on [control=['for'], data=['r']] # depends on [control=['if'], data=[]] |
def phylotree(self):
"""
Get the c++ PhyloTree object corresponding to this tree.
:return: PhyloTree instance
"""
if not self._phylotree or self._dirty:
try:
if ISPY3:
self._phylotree = PhyloTree(self.newick.encode(), self.rooted)
else:
self._phylotree = PhyloTree(self.newick, self.rooted)
except ValueError:
logger.error('Couldn\'t convert to C++ PhyloTree -- are there bootstrap values?')
self._dirty = False
return self._phylotree | def function[phylotree, parameter[self]]:
constant[
Get the c++ PhyloTree object corresponding to this tree.
:return: PhyloTree instance
]
if <ast.BoolOp object at 0x7da18bc70f70> begin[:]
<ast.Try object at 0x7da18bc714e0>
name[self]._dirty assign[=] constant[False]
return[name[self]._phylotree] | keyword[def] identifier[phylotree] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_phylotree] keyword[or] identifier[self] . identifier[_dirty] :
keyword[try] :
keyword[if] identifier[ISPY3] :
identifier[self] . identifier[_phylotree] = identifier[PhyloTree] ( identifier[self] . identifier[newick] . identifier[encode] (), identifier[self] . identifier[rooted] )
keyword[else] :
identifier[self] . identifier[_phylotree] = identifier[PhyloTree] ( identifier[self] . identifier[newick] , identifier[self] . identifier[rooted] )
keyword[except] identifier[ValueError] :
identifier[logger] . identifier[error] ( literal[string] )
identifier[self] . identifier[_dirty] = keyword[False]
keyword[return] identifier[self] . identifier[_phylotree] | def phylotree(self):
"""
Get the c++ PhyloTree object corresponding to this tree.
:return: PhyloTree instance
"""
if not self._phylotree or self._dirty:
try:
if ISPY3:
self._phylotree = PhyloTree(self.newick.encode(), self.rooted) # depends on [control=['if'], data=[]]
else:
self._phylotree = PhyloTree(self.newick, self.rooted) # depends on [control=['try'], data=[]]
except ValueError:
logger.error("Couldn't convert to C++ PhyloTree -- are there bootstrap values?") # depends on [control=['except'], data=[]]
self._dirty = False # depends on [control=['if'], data=[]]
return self._phylotree |
def str_endswith(arr, pat, na=np.nan):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool) | def function[str_endswith, parameter[arr, pat, na]]:
constant[
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
]
variable[f] assign[=] <ast.Lambda object at 0x7da20e9b1540>
return[call[name[_na_map], parameter[name[f], name[arr], name[na]]]] | keyword[def] identifier[str_endswith] ( identifier[arr] , identifier[pat] , identifier[na] = identifier[np] . identifier[nan] ):
literal[string]
identifier[f] = keyword[lambda] identifier[x] : identifier[x] . identifier[endswith] ( identifier[pat] )
keyword[return] identifier[_na_map] ( identifier[f] , identifier[arr] , identifier[na] , identifier[dtype] = identifier[bool] ) | def str_endswith(arr, pat, na=np.nan):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool) |
def _fmt_structured(d):
"""Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2'
Output is lexically sorted, *except* the time and pid always
come first, to assist with human scanning of the data.
"""
timeEntry = datetime.datetime.utcnow().strftime(
"time=%Y-%m-%dT%H:%M:%S.%f-00")
pidEntry = "pid=" + str(os.getpid())
rest = sorted('='.join([str(k), str(v)])
for (k, v) in list(d.items()))
return ' '.join([timeEntry, pidEntry] + rest) | def function[_fmt_structured, parameter[d]]:
constant[Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2'
Output is lexically sorted, *except* the time and pid always
come first, to assist with human scanning of the data.
]
variable[timeEntry] assign[=] call[call[name[datetime].datetime.utcnow, parameter[]].strftime, parameter[constant[time=%Y-%m-%dT%H:%M:%S.%f-00]]]
variable[pidEntry] assign[=] binary_operation[constant[pid=] + call[name[str], parameter[call[name[os].getpid, parameter[]]]]]
variable[rest] assign[=] call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da20c6abf40>]]
return[call[constant[ ].join, parameter[binary_operation[list[[<ast.Name object at 0x7da20c6a9b10>, <ast.Name object at 0x7da20c6a9960>]] + name[rest]]]]] | keyword[def] identifier[_fmt_structured] ( identifier[d] ):
literal[string]
identifier[timeEntry] = identifier[datetime] . identifier[datetime] . identifier[utcnow] (). identifier[strftime] (
literal[string] )
identifier[pidEntry] = literal[string] + identifier[str] ( identifier[os] . identifier[getpid] ())
identifier[rest] = identifier[sorted] ( literal[string] . identifier[join] ([ identifier[str] ( identifier[k] ), identifier[str] ( identifier[v] )])
keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[list] ( identifier[d] . identifier[items] ()))
keyword[return] literal[string] . identifier[join] ([ identifier[timeEntry] , identifier[pidEntry] ]+ identifier[rest] ) | def _fmt_structured(d):
"""Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2'
Output is lexically sorted, *except* the time and pid always
come first, to assist with human scanning of the data.
"""
timeEntry = datetime.datetime.utcnow().strftime('time=%Y-%m-%dT%H:%M:%S.%f-00')
pidEntry = 'pid=' + str(os.getpid())
rest = sorted(('='.join([str(k), str(v)]) for (k, v) in list(d.items())))
return ' '.join([timeEntry, pidEntry] + rest) |
def display_text(text, highlight=None, max_width=80, split_at=None):
"""In a Jupyter notebook, takes a multi-line string, displays these lines
in '<pre></pre>' block HTML. A list of lines may be provided that should
be highlighted in the result. A highlight gives a gray background and bold
font.
:param text: string
:param lines: list of lines to be highlighted.
"""
from IPython.display import display, HTML
if not isinstance(text, str):
text = str(text)
highlight = highlight or []
split_at = split_at or max_width - 10
style = [
'font-size: 9pt; margin: 0pt',
'background: #eeeeee; color: black;'
'font-weight: bold; font-size: 9pt; margin: 0pt']
display(HTML('\n'.join(
'<pre style="{}">{}</pre>'.format(
style[i in highlight],
html.escape(snip_line(line, max_width, split_at)))
for i, line in enumerate(text.splitlines())))) | def function[display_text, parameter[text, highlight, max_width, split_at]]:
constant[In a Jupyter notebook, takes a multi-line string, displays these lines
in '<pre></pre>' block HTML. A list of lines may be provided that should
be highlighted in the result. A highlight gives a gray background and bold
font.
:param text: string
:param lines: list of lines to be highlighted.
]
from relative_module[IPython.display] import module[display], module[HTML]
if <ast.UnaryOp object at 0x7da18eb54100> begin[:]
variable[text] assign[=] call[name[str], parameter[name[text]]]
variable[highlight] assign[=] <ast.BoolOp object at 0x7da18eb553f0>
variable[split_at] assign[=] <ast.BoolOp object at 0x7da18eb546a0>
variable[style] assign[=] list[[<ast.Constant object at 0x7da18eb57ac0>, <ast.Constant object at 0x7da18eb566e0>]]
call[name[display], parameter[call[name[HTML], parameter[call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da18eb544f0>]]]]]] | keyword[def] identifier[display_text] ( identifier[text] , identifier[highlight] = keyword[None] , identifier[max_width] = literal[int] , identifier[split_at] = keyword[None] ):
literal[string]
keyword[from] identifier[IPython] . identifier[display] keyword[import] identifier[display] , identifier[HTML]
keyword[if] keyword[not] identifier[isinstance] ( identifier[text] , identifier[str] ):
identifier[text] = identifier[str] ( identifier[text] )
identifier[highlight] = identifier[highlight] keyword[or] []
identifier[split_at] = identifier[split_at] keyword[or] identifier[max_width] - literal[int]
identifier[style] =[
literal[string] ,
literal[string]
literal[string] ]
identifier[display] ( identifier[HTML] ( literal[string] . identifier[join] (
literal[string] . identifier[format] (
identifier[style] [ identifier[i] keyword[in] identifier[highlight] ],
identifier[html] . identifier[escape] ( identifier[snip_line] ( identifier[line] , identifier[max_width] , identifier[split_at] )))
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[text] . identifier[splitlines] ())))) | def display_text(text, highlight=None, max_width=80, split_at=None):
"""In a Jupyter notebook, takes a multi-line string, displays these lines
in '<pre></pre>' block HTML. A list of lines may be provided that should
be highlighted in the result. A highlight gives a gray background and bold
font.
:param text: string
:param lines: list of lines to be highlighted.
"""
from IPython.display import display, HTML
if not isinstance(text, str):
text = str(text) # depends on [control=['if'], data=[]]
highlight = highlight or []
split_at = split_at or max_width - 10
style = ['font-size: 9pt; margin: 0pt', 'background: #eeeeee; color: black;font-weight: bold; font-size: 9pt; margin: 0pt']
display(HTML('\n'.join(('<pre style="{}">{}</pre>'.format(style[i in highlight], html.escape(snip_line(line, max_width, split_at))) for (i, line) in enumerate(text.splitlines()))))) |
def process_amqp_msgs(self):
"""Process AMQP queue messages.
It connects to AMQP server and calls callbacks to process DCNM events,
i.e. routing key containing '.cisco.dcnm.', once they arrive in the
queue.
"""
LOG.info('Starting process_amqp_msgs...')
while True:
(mtd_fr, hdr_fr, body) = (None, None, None)
try:
if self.consume_channel:
(mtd_fr, hdr_fr, body) = self.consume_channel.basic_get(
self._dcnm_queue_name)
if mtd_fr:
# Queue has messages.
LOG.info('RX message: %s', body)
self._cb_dcnm_msg(mtd_fr, body)
self.consume_channel.basic_ack(mtd_fr.delivery_tag)
else:
# Queue is empty.
try:
self._conn.sleep(1)
except AttributeError:
time.sleep(1)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_str = traceback.format_exception(exc_type,
exc_value, exc_tb)
LOG.exception("Failed to read from queue: %(queue)s "
"%(exc_type)s, %(exc_value)s, %(exc_tb)s.", {
'queue': self._dcnm_queue_name,
'exc_type': exc_type,
'exc_value': exc_value,
'exc_tb': tb_str}) | def function[process_amqp_msgs, parameter[self]]:
constant[Process AMQP queue messages.
It connects to AMQP server and calls callbacks to process DCNM events,
i.e. routing key containing '.cisco.dcnm.', once they arrive in the
queue.
]
call[name[LOG].info, parameter[constant[Starting process_amqp_msgs...]]]
while constant[True] begin[:]
<ast.Tuple object at 0x7da1b1c61030> assign[=] tuple[[<ast.Constant object at 0x7da1b1c61330>, <ast.Constant object at 0x7da1b1c61e40>, <ast.Constant object at 0x7da1b1c61db0>]]
<ast.Try object at 0x7da1b1c61630> | keyword[def] identifier[process_amqp_msgs] ( identifier[self] ):
literal[string]
identifier[LOG] . identifier[info] ( literal[string] )
keyword[while] keyword[True] :
( identifier[mtd_fr] , identifier[hdr_fr] , identifier[body] )=( keyword[None] , keyword[None] , keyword[None] )
keyword[try] :
keyword[if] identifier[self] . identifier[consume_channel] :
( identifier[mtd_fr] , identifier[hdr_fr] , identifier[body] )= identifier[self] . identifier[consume_channel] . identifier[basic_get] (
identifier[self] . identifier[_dcnm_queue_name] )
keyword[if] identifier[mtd_fr] :
identifier[LOG] . identifier[info] ( literal[string] , identifier[body] )
identifier[self] . identifier[_cb_dcnm_msg] ( identifier[mtd_fr] , identifier[body] )
identifier[self] . identifier[consume_channel] . identifier[basic_ack] ( identifier[mtd_fr] . identifier[delivery_tag] )
keyword[else] :
keyword[try] :
identifier[self] . identifier[_conn] . identifier[sleep] ( literal[int] )
keyword[except] identifier[AttributeError] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[except] identifier[Exception] :
identifier[exc_type] , identifier[exc_value] , identifier[exc_tb] = identifier[sys] . identifier[exc_info] ()
identifier[tb_str] = identifier[traceback] . identifier[format_exception] ( identifier[exc_type] ,
identifier[exc_value] , identifier[exc_tb] )
identifier[LOG] . identifier[exception] ( literal[string]
literal[string] ,{
literal[string] : identifier[self] . identifier[_dcnm_queue_name] ,
literal[string] : identifier[exc_type] ,
literal[string] : identifier[exc_value] ,
literal[string] : identifier[tb_str] }) | def process_amqp_msgs(self):
"""Process AMQP queue messages.
It connects to AMQP server and calls callbacks to process DCNM events,
i.e. routing key containing '.cisco.dcnm.', once they arrive in the
queue.
"""
LOG.info('Starting process_amqp_msgs...')
while True:
(mtd_fr, hdr_fr, body) = (None, None, None)
try:
if self.consume_channel:
(mtd_fr, hdr_fr, body) = self.consume_channel.basic_get(self._dcnm_queue_name) # depends on [control=['if'], data=[]]
if mtd_fr:
# Queue has messages.
LOG.info('RX message: %s', body)
self._cb_dcnm_msg(mtd_fr, body)
self.consume_channel.basic_ack(mtd_fr.delivery_tag) # depends on [control=['if'], data=[]]
else:
# Queue is empty.
try:
self._conn.sleep(1) # depends on [control=['try'], data=[]]
except AttributeError:
time.sleep(1) # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
(exc_type, exc_value, exc_tb) = sys.exc_info()
tb_str = traceback.format_exception(exc_type, exc_value, exc_tb)
LOG.exception('Failed to read from queue: %(queue)s %(exc_type)s, %(exc_value)s, %(exc_tb)s.', {'queue': self._dcnm_queue_name, 'exc_type': exc_type, 'exc_value': exc_value, 'exc_tb': tb_str}) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def info_factory(name, libnames, headers, frameworks=None,
section=None, classname=None):
"""Create a system_info class.
Parameters
----------
name : str
name of the library
libnames : seq
list of libraries to look for
headers : seq
list of headers to look for
classname : str
name of the returned class
section : str
section name in the site.cfg
Returns
-------
a system_info-derived class with the given meta-parameters
"""
if not classname:
classname = '%s_info' % name
if not section:
section = name
if not frameworks:
framesworks = []
class _ret(system_info):
def __init__(self):
system_info.__init__(self)
def library_extensions(self):
return system_info.library_extensions(self)
def calc_info(self):
""" Compute the informations of the library """
if libnames:
libs = self.get_libs('libraries', '')
if not libs:
libs = libnames
# Look for the shared library
lib_dirs = self.get_lib_dirs()
tmp = None
for d in lib_dirs:
tmp = self.check_libs(d, libs)
if tmp is not None:
info = tmp
break
if tmp is None:
return
# Look for the header file
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, headers)
if p:
inc_dir = os.path.dirname(p[0])
dict_append(info, include_dirs=[d])
break
if inc_dir is None:
log.info(' %s not found' % name)
return
self.set_info(**info)
else:
# Look for frameworks
if frameworks:
fargs = []
for f in frameworks:
p = "/System/Library/Frameworks/%s.framework" % f
if os.path.exists(p):
fargs.append("-framework")
fargs.append(f)
if fargs:
self.set_info(extra_link_args=fargs)
return
_ret.__name__ = classname
_ret.section = section
return _ret | def function[info_factory, parameter[name, libnames, headers, frameworks, section, classname]]:
constant[Create a system_info class.
Parameters
----------
name : str
name of the library
libnames : seq
list of libraries to look for
headers : seq
list of headers to look for
classname : str
name of the returned class
section : str
section name in the site.cfg
Returns
-------
a system_info-derived class with the given meta-parameters
]
if <ast.UnaryOp object at 0x7da1b28502e0> begin[:]
variable[classname] assign[=] binary_operation[constant[%s_info] <ast.Mod object at 0x7da2590d6920> name[name]]
if <ast.UnaryOp object at 0x7da1b28513f0> begin[:]
variable[section] assign[=] name[name]
if <ast.UnaryOp object at 0x7da1b2852620> begin[:]
variable[framesworks] assign[=] list[[]]
class class[_ret, parameter[]] begin[:]
def function[__init__, parameter[self]]:
call[name[system_info].__init__, parameter[name[self]]]
def function[library_extensions, parameter[self]]:
return[call[name[system_info].library_extensions, parameter[name[self]]]]
def function[calc_info, parameter[self]]:
constant[ Compute the informations of the library ]
if name[libnames] begin[:]
variable[libs] assign[=] call[name[self].get_libs, parameter[constant[libraries], constant[]]]
if <ast.UnaryOp object at 0x7da1b2838130> begin[:]
variable[libs] assign[=] name[libnames]
variable[lib_dirs] assign[=] call[name[self].get_lib_dirs, parameter[]]
variable[tmp] assign[=] constant[None]
for taget[name[d]] in starred[name[lib_dirs]] begin[:]
variable[tmp] assign[=] call[name[self].check_libs, parameter[name[d], name[libs]]]
if compare[name[tmp] is_not constant[None]] begin[:]
variable[info] assign[=] name[tmp]
break
if compare[name[tmp] is constant[None]] begin[:]
return[None]
variable[include_dirs] assign[=] call[name[self].get_include_dirs, parameter[]]
variable[inc_dir] assign[=] constant[None]
for taget[name[d]] in starred[name[include_dirs]] begin[:]
variable[p] assign[=] call[name[self].combine_paths, parameter[name[d], name[headers]]]
if name[p] begin[:]
variable[inc_dir] assign[=] call[name[os].path.dirname, parameter[call[name[p]][constant[0]]]]
call[name[dict_append], parameter[name[info]]]
break
if compare[name[inc_dir] is constant[None]] begin[:]
call[name[log].info, parameter[binary_operation[constant[ %s not found] <ast.Mod object at 0x7da2590d6920> name[name]]]]
return[None]
call[name[self].set_info, parameter[]]
return[None]
name[_ret].__name__ assign[=] name[classname]
name[_ret].section assign[=] name[section]
return[name[_ret]] | keyword[def] identifier[info_factory] ( identifier[name] , identifier[libnames] , identifier[headers] , identifier[frameworks] = keyword[None] ,
identifier[section] = keyword[None] , identifier[classname] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[classname] :
identifier[classname] = literal[string] % identifier[name]
keyword[if] keyword[not] identifier[section] :
identifier[section] = identifier[name]
keyword[if] keyword[not] identifier[frameworks] :
identifier[framesworks] =[]
keyword[class] identifier[_ret] ( identifier[system_info] ):
keyword[def] identifier[__init__] ( identifier[self] ):
identifier[system_info] . identifier[__init__] ( identifier[self] )
keyword[def] identifier[library_extensions] ( identifier[self] ):
keyword[return] identifier[system_info] . identifier[library_extensions] ( identifier[self] )
keyword[def] identifier[calc_info] ( identifier[self] ):
literal[string]
keyword[if] identifier[libnames] :
identifier[libs] = identifier[self] . identifier[get_libs] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[libs] :
identifier[libs] = identifier[libnames]
identifier[lib_dirs] = identifier[self] . identifier[get_lib_dirs] ()
identifier[tmp] = keyword[None]
keyword[for] identifier[d] keyword[in] identifier[lib_dirs] :
identifier[tmp] = identifier[self] . identifier[check_libs] ( identifier[d] , identifier[libs] )
keyword[if] identifier[tmp] keyword[is] keyword[not] keyword[None] :
identifier[info] = identifier[tmp]
keyword[break]
keyword[if] identifier[tmp] keyword[is] keyword[None] :
keyword[return]
identifier[include_dirs] = identifier[self] . identifier[get_include_dirs] ()
identifier[inc_dir] = keyword[None]
keyword[for] identifier[d] keyword[in] identifier[include_dirs] :
identifier[p] = identifier[self] . identifier[combine_paths] ( identifier[d] , identifier[headers] )
keyword[if] identifier[p] :
identifier[inc_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[p] [ literal[int] ])
identifier[dict_append] ( identifier[info] , identifier[include_dirs] =[ identifier[d] ])
keyword[break]
keyword[if] identifier[inc_dir] keyword[is] keyword[None] :
identifier[log] . identifier[info] ( literal[string] % identifier[name] )
keyword[return]
identifier[self] . identifier[set_info] (** identifier[info] )
keyword[else] :
keyword[if] identifier[frameworks] :
identifier[fargs] =[]
keyword[for] identifier[f] keyword[in] identifier[frameworks] :
identifier[p] = literal[string] % identifier[f]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[p] ):
identifier[fargs] . identifier[append] ( literal[string] )
identifier[fargs] . identifier[append] ( identifier[f] )
keyword[if] identifier[fargs] :
identifier[self] . identifier[set_info] ( identifier[extra_link_args] = identifier[fargs] )
keyword[return]
identifier[_ret] . identifier[__name__] = identifier[classname]
identifier[_ret] . identifier[section] = identifier[section]
keyword[return] identifier[_ret] | def info_factory(name, libnames, headers, frameworks=None, section=None, classname=None):
"""Create a system_info class.
Parameters
----------
name : str
name of the library
libnames : seq
list of libraries to look for
headers : seq
list of headers to look for
classname : str
name of the returned class
section : str
section name in the site.cfg
Returns
-------
a system_info-derived class with the given meta-parameters
"""
if not classname:
classname = '%s_info' % name # depends on [control=['if'], data=[]]
if not section:
section = name # depends on [control=['if'], data=[]]
if not frameworks:
framesworks = [] # depends on [control=['if'], data=[]]
class _ret(system_info):
def __init__(self):
system_info.__init__(self)
def library_extensions(self):
return system_info.library_extensions(self)
def calc_info(self):
""" Compute the informations of the library """
if libnames:
libs = self.get_libs('libraries', '')
if not libs:
libs = libnames # depends on [control=['if'], data=[]]
# Look for the shared library
lib_dirs = self.get_lib_dirs()
tmp = None
for d in lib_dirs:
tmp = self.check_libs(d, libs)
if tmp is not None:
info = tmp
break # depends on [control=['if'], data=['tmp']] # depends on [control=['for'], data=['d']]
if tmp is None:
return # depends on [control=['if'], data=[]]
# Look for the header file
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, headers)
if p:
inc_dir = os.path.dirname(p[0])
dict_append(info, include_dirs=[d])
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']]
if inc_dir is None:
log.info(' %s not found' % name)
return # depends on [control=['if'], data=[]]
self.set_info(**info) # depends on [control=['if'], data=[]]
# Look for frameworks
elif frameworks:
fargs = []
for f in frameworks:
p = '/System/Library/Frameworks/%s.framework' % f
if os.path.exists(p):
fargs.append('-framework')
fargs.append(f) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
if fargs:
self.set_info(extra_link_args=fargs) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return
_ret.__name__ = classname
_ret.section = section
return _ret |
def axes(self):
'''A list of axes of rotation for this joint.'''
return [np.array(self.ode_obj.getAxis1()),
np.array(self.ode_obj.getAxis2())] | def function[axes, parameter[self]]:
constant[A list of axes of rotation for this joint.]
return[list[[<ast.Call object at 0x7da1b0049660>, <ast.Call object at 0x7da1b0048340>]]] | keyword[def] identifier[axes] ( identifier[self] ):
literal[string]
keyword[return] [ identifier[np] . identifier[array] ( identifier[self] . identifier[ode_obj] . identifier[getAxis1] ()),
identifier[np] . identifier[array] ( identifier[self] . identifier[ode_obj] . identifier[getAxis2] ())] | def axes(self):
"""A list of axes of rotation for this joint."""
return [np.array(self.ode_obj.getAxis1()), np.array(self.ode_obj.getAxis2())] |
def set_logxticks(self, row, column, logticks):
"""Manually specify the x-axis log tick values.
:param row,column: specify the subplot.
:param logticks: logarithm of the locations for the ticks along the
axis.
For example, if you specify [1, 2, 3], ticks will be placed at 10,
100 and 1000.
"""
subplot = self.get_subplot_at(row, column)
subplot.set_logxticks(logticks) | def function[set_logxticks, parameter[self, row, column, logticks]]:
constant[Manually specify the x-axis log tick values.
:param row,column: specify the subplot.
:param logticks: logarithm of the locations for the ticks along the
axis.
For example, if you specify [1, 2, 3], ticks will be placed at 10,
100 and 1000.
]
variable[subplot] assign[=] call[name[self].get_subplot_at, parameter[name[row], name[column]]]
call[name[subplot].set_logxticks, parameter[name[logticks]]] | keyword[def] identifier[set_logxticks] ( identifier[self] , identifier[row] , identifier[column] , identifier[logticks] ):
literal[string]
identifier[subplot] = identifier[self] . identifier[get_subplot_at] ( identifier[row] , identifier[column] )
identifier[subplot] . identifier[set_logxticks] ( identifier[logticks] ) | def set_logxticks(self, row, column, logticks):
"""Manually specify the x-axis log tick values.
:param row,column: specify the subplot.
:param logticks: logarithm of the locations for the ticks along the
axis.
For example, if you specify [1, 2, 3], ticks will be placed at 10,
100 and 1000.
"""
subplot = self.get_subplot_at(row, column)
subplot.set_logxticks(logticks) |
def _aspirate_plunger_position(self, ul):
"""Calculate axis position for a given liquid volume.
Translates the passed liquid volume to absolute coordinates
on the axis associated with this pipette.
Calibration of the pipette motor's ul-to-mm conversion is required
"""
millimeters = ul / self._ul_per_mm(ul, 'aspirate')
destination_mm = self._get_plunger_position('bottom') + millimeters
return round(destination_mm, 6) | def function[_aspirate_plunger_position, parameter[self, ul]]:
constant[Calculate axis position for a given liquid volume.
Translates the passed liquid volume to absolute coordinates
on the axis associated with this pipette.
Calibration of the pipette motor's ul-to-mm conversion is required
]
variable[millimeters] assign[=] binary_operation[name[ul] / call[name[self]._ul_per_mm, parameter[name[ul], constant[aspirate]]]]
variable[destination_mm] assign[=] binary_operation[call[name[self]._get_plunger_position, parameter[constant[bottom]]] + name[millimeters]]
return[call[name[round], parameter[name[destination_mm], constant[6]]]] | keyword[def] identifier[_aspirate_plunger_position] ( identifier[self] , identifier[ul] ):
literal[string]
identifier[millimeters] = identifier[ul] / identifier[self] . identifier[_ul_per_mm] ( identifier[ul] , literal[string] )
identifier[destination_mm] = identifier[self] . identifier[_get_plunger_position] ( literal[string] )+ identifier[millimeters]
keyword[return] identifier[round] ( identifier[destination_mm] , literal[int] ) | def _aspirate_plunger_position(self, ul):
"""Calculate axis position for a given liquid volume.
Translates the passed liquid volume to absolute coordinates
on the axis associated with this pipette.
Calibration of the pipette motor's ul-to-mm conversion is required
"""
millimeters = ul / self._ul_per_mm(ul, 'aspirate')
destination_mm = self._get_plunger_position('bottom') + millimeters
return round(destination_mm, 6) |
def open_handle(self, dwDesiredAccess = win32.PROCESS_ALL_ACCESS):
"""
Opens a new handle to the process.
The new handle is stored in the L{hProcess} property.
@warn: Normally you should call L{get_handle} instead, since it's much
"smarter" and tries to reuse handles and merge access rights.
@type dwDesiredAccess: int
@param dwDesiredAccess: Desired access rights.
Defaults to L{win32.PROCESS_ALL_ACCESS}.
See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms684880(v=vs.85).aspx}
@raise WindowsError: It's not possible to open a handle to the process
with the requested access rights. This tipically happens because
the target process is a system process and the debugger is not
runnning with administrative rights.
"""
hProcess = win32.OpenProcess(dwDesiredAccess, win32.FALSE, self.dwProcessId)
try:
self.close_handle()
except Exception:
warnings.warn(
"Failed to close process handle: %s" % traceback.format_exc())
self.hProcess = hProcess | def function[open_handle, parameter[self, dwDesiredAccess]]:
constant[
Opens a new handle to the process.
The new handle is stored in the L{hProcess} property.
@warn: Normally you should call L{get_handle} instead, since it's much
"smarter" and tries to reuse handles and merge access rights.
@type dwDesiredAccess: int
@param dwDesiredAccess: Desired access rights.
Defaults to L{win32.PROCESS_ALL_ACCESS}.
See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms684880(v=vs.85).aspx}
@raise WindowsError: It's not possible to open a handle to the process
with the requested access rights. This tipically happens because
the target process is a system process and the debugger is not
runnning with administrative rights.
]
variable[hProcess] assign[=] call[name[win32].OpenProcess, parameter[name[dwDesiredAccess], name[win32].FALSE, name[self].dwProcessId]]
<ast.Try object at 0x7da1b08dc100>
name[self].hProcess assign[=] name[hProcess] | keyword[def] identifier[open_handle] ( identifier[self] , identifier[dwDesiredAccess] = identifier[win32] . identifier[PROCESS_ALL_ACCESS] ):
literal[string]
identifier[hProcess] = identifier[win32] . identifier[OpenProcess] ( identifier[dwDesiredAccess] , identifier[win32] . identifier[FALSE] , identifier[self] . identifier[dwProcessId] )
keyword[try] :
identifier[self] . identifier[close_handle] ()
keyword[except] identifier[Exception] :
identifier[warnings] . identifier[warn] (
literal[string] % identifier[traceback] . identifier[format_exc] ())
identifier[self] . identifier[hProcess] = identifier[hProcess] | def open_handle(self, dwDesiredAccess=win32.PROCESS_ALL_ACCESS):
"""
Opens a new handle to the process.
The new handle is stored in the L{hProcess} property.
@warn: Normally you should call L{get_handle} instead, since it's much
"smarter" and tries to reuse handles and merge access rights.
@type dwDesiredAccess: int
@param dwDesiredAccess: Desired access rights.
Defaults to L{win32.PROCESS_ALL_ACCESS}.
See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms684880(v=vs.85).aspx}
@raise WindowsError: It's not possible to open a handle to the process
with the requested access rights. This tipically happens because
the target process is a system process and the debugger is not
runnning with administrative rights.
"""
hProcess = win32.OpenProcess(dwDesiredAccess, win32.FALSE, self.dwProcessId)
try:
self.close_handle() # depends on [control=['try'], data=[]]
except Exception:
warnings.warn('Failed to close process handle: %s' % traceback.format_exc()) # depends on [control=['except'], data=[]]
self.hProcess = hProcess |
def lon_lat_trt_pmf(matrices):
"""
Fold full disaggregation matrices to lon / lat / TRT PMF.
:param matrices:
a matrix with T submatrices
:returns:
3d array. First dimension represents longitude histogram bins,
second one latitude histogram bins, third one trt histogram bins.
"""
res = numpy.array([lon_lat_pmf(mat) for mat in matrices])
return res.transpose(1, 2, 0) | def function[lon_lat_trt_pmf, parameter[matrices]]:
constant[
Fold full disaggregation matrices to lon / lat / TRT PMF.
:param matrices:
a matrix with T submatrices
:returns:
3d array. First dimension represents longitude histogram bins,
second one latitude histogram bins, third one trt histogram bins.
]
variable[res] assign[=] call[name[numpy].array, parameter[<ast.ListComp object at 0x7da207f01a80>]]
return[call[name[res].transpose, parameter[constant[1], constant[2], constant[0]]]] | keyword[def] identifier[lon_lat_trt_pmf] ( identifier[matrices] ):
literal[string]
identifier[res] = identifier[numpy] . identifier[array] ([ identifier[lon_lat_pmf] ( identifier[mat] ) keyword[for] identifier[mat] keyword[in] identifier[matrices] ])
keyword[return] identifier[res] . identifier[transpose] ( literal[int] , literal[int] , literal[int] ) | def lon_lat_trt_pmf(matrices):
"""
Fold full disaggregation matrices to lon / lat / TRT PMF.
:param matrices:
a matrix with T submatrices
:returns:
3d array. First dimension represents longitude histogram bins,
second one latitude histogram bins, third one trt histogram bins.
"""
res = numpy.array([lon_lat_pmf(mat) for mat in matrices])
return res.transpose(1, 2, 0) |
def metadata_updated_on(item):
"""Extracts and coverts the update time from a Bugzilla item.
The timestamp is extracted from 'delta_ts' field. This date is
converted to UNIX timestamp format. Due Bugzilla servers ignore
the timezone on HTTP requests, it will be ignored during the
conversion, too.
:param item: item generated by the backend
:returns: a UNIX timestamp
"""
ts = item['delta_ts'][0]['__text__']
ts = str_to_datetime(ts)
ts = ts.replace(tzinfo=dateutil.tz.tzutc())
return ts.timestamp() | def function[metadata_updated_on, parameter[item]]:
constant[Extracts and coverts the update time from a Bugzilla item.
The timestamp is extracted from 'delta_ts' field. This date is
converted to UNIX timestamp format. Due Bugzilla servers ignore
the timezone on HTTP requests, it will be ignored during the
conversion, too.
:param item: item generated by the backend
:returns: a UNIX timestamp
]
variable[ts] assign[=] call[call[call[name[item]][constant[delta_ts]]][constant[0]]][constant[__text__]]
variable[ts] assign[=] call[name[str_to_datetime], parameter[name[ts]]]
variable[ts] assign[=] call[name[ts].replace, parameter[]]
return[call[name[ts].timestamp, parameter[]]] | keyword[def] identifier[metadata_updated_on] ( identifier[item] ):
literal[string]
identifier[ts] = identifier[item] [ literal[string] ][ literal[int] ][ literal[string] ]
identifier[ts] = identifier[str_to_datetime] ( identifier[ts] )
identifier[ts] = identifier[ts] . identifier[replace] ( identifier[tzinfo] = identifier[dateutil] . identifier[tz] . identifier[tzutc] ())
keyword[return] identifier[ts] . identifier[timestamp] () | def metadata_updated_on(item):
"""Extracts and coverts the update time from a Bugzilla item.
The timestamp is extracted from 'delta_ts' field. This date is
converted to UNIX timestamp format. Due Bugzilla servers ignore
the timezone on HTTP requests, it will be ignored during the
conversion, too.
:param item: item generated by the backend
:returns: a UNIX timestamp
"""
ts = item['delta_ts'][0]['__text__']
ts = str_to_datetime(ts)
ts = ts.replace(tzinfo=dateutil.tz.tzutc())
return ts.timestamp() |
def get(self, schema, query=None, **kwargs):
"""
get matching rows from the db matching filters set in query
schema -- Schema()
query -- Query()
return -- list -- a list of matching dicts
"""
ret = self._get_query(self._get, schema, query, **kwargs)
if not ret: ret = []
return ret | def function[get, parameter[self, schema, query]]:
constant[
get matching rows from the db matching filters set in query
schema -- Schema()
query -- Query()
return -- list -- a list of matching dicts
]
variable[ret] assign[=] call[name[self]._get_query, parameter[name[self]._get, name[schema], name[query]]]
if <ast.UnaryOp object at 0x7da18f09d0c0> begin[:]
variable[ret] assign[=] list[[]]
return[name[ret]] | keyword[def] identifier[get] ( identifier[self] , identifier[schema] , identifier[query] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[ret] = identifier[self] . identifier[_get_query] ( identifier[self] . identifier[_get] , identifier[schema] , identifier[query] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[ret] : identifier[ret] =[]
keyword[return] identifier[ret] | def get(self, schema, query=None, **kwargs):
"""
get matching rows from the db matching filters set in query
schema -- Schema()
query -- Query()
return -- list -- a list of matching dicts
"""
ret = self._get_query(self._get, schema, query, **kwargs)
if not ret:
ret = [] # depends on [control=['if'], data=[]]
return ret |
def _encode_key(self, obj):
"""Encodes a dictionary key - a key can only be a string in std JSON"""
if obj.__class__ is str:
return self._encode_str(obj)
if obj.__class__ is UUID:
return '"' + str(obj) + '"'
# __mm_serialize__ is called before any isinstance checks (but after exact type checks)
try:
sx_encoder = obj.__mm_serialize__
except AttributeError:
pass
else:
try:
data = sx_encoder()
except NotImplementedError:
pass
else:
return self._encode_key(data)
if isinstance(obj, UUID):
return '"' + str(obj) + '"'
if isinstance(obj, str):
return self._encode_str(obj)
# if everything else failed try the default() method and re-raise any TypeError
# exceptions as more specific "not a valid dict key" TypeErrors
try:
value = self.default(obj)
except TypeError:
raise TypeError('{!r} is not a valid dictionary key'.format(obj))
return self._encode_key(value) | def function[_encode_key, parameter[self, obj]]:
constant[Encodes a dictionary key - a key can only be a string in std JSON]
if compare[name[obj].__class__ is name[str]] begin[:]
return[call[name[self]._encode_str, parameter[name[obj]]]]
if compare[name[obj].__class__ is name[UUID]] begin[:]
return[binary_operation[binary_operation[constant["] + call[name[str], parameter[name[obj]]]] + constant["]]]
<ast.Try object at 0x7da18f721480>
if call[name[isinstance], parameter[name[obj], name[UUID]]] begin[:]
return[binary_operation[binary_operation[constant["] + call[name[str], parameter[name[obj]]]] + constant["]]]
if call[name[isinstance], parameter[name[obj], name[str]]] begin[:]
return[call[name[self]._encode_str, parameter[name[obj]]]]
<ast.Try object at 0x7da18f7234f0>
return[call[name[self]._encode_key, parameter[name[value]]]] | keyword[def] identifier[_encode_key] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[if] identifier[obj] . identifier[__class__] keyword[is] identifier[str] :
keyword[return] identifier[self] . identifier[_encode_str] ( identifier[obj] )
keyword[if] identifier[obj] . identifier[__class__] keyword[is] identifier[UUID] :
keyword[return] literal[string] + identifier[str] ( identifier[obj] )+ literal[string]
keyword[try] :
identifier[sx_encoder] = identifier[obj] . identifier[__mm_serialize__]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[else] :
keyword[try] :
identifier[data] = identifier[sx_encoder] ()
keyword[except] identifier[NotImplementedError] :
keyword[pass]
keyword[else] :
keyword[return] identifier[self] . identifier[_encode_key] ( identifier[data] )
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[UUID] ):
keyword[return] literal[string] + identifier[str] ( identifier[obj] )+ literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[str] ):
keyword[return] identifier[self] . identifier[_encode_str] ( identifier[obj] )
keyword[try] :
identifier[value] = identifier[self] . identifier[default] ( identifier[obj] )
keyword[except] identifier[TypeError] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[obj] ))
keyword[return] identifier[self] . identifier[_encode_key] ( identifier[value] ) | def _encode_key(self, obj):
"""Encodes a dictionary key - a key can only be a string in std JSON"""
if obj.__class__ is str:
return self._encode_str(obj) # depends on [control=['if'], data=[]]
if obj.__class__ is UUID:
return '"' + str(obj) + '"' # depends on [control=['if'], data=[]]
# __mm_serialize__ is called before any isinstance checks (but after exact type checks)
try:
sx_encoder = obj.__mm_serialize__ # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
else:
try:
data = sx_encoder() # depends on [control=['try'], data=[]]
except NotImplementedError:
pass # depends on [control=['except'], data=[]]
else:
return self._encode_key(data)
if isinstance(obj, UUID):
return '"' + str(obj) + '"' # depends on [control=['if'], data=[]]
if isinstance(obj, str):
return self._encode_str(obj) # depends on [control=['if'], data=[]]
# if everything else failed try the default() method and re-raise any TypeError
# exceptions as more specific "not a valid dict key" TypeErrors
try:
value = self.default(obj) # depends on [control=['try'], data=[]]
except TypeError:
raise TypeError('{!r} is not a valid dictionary key'.format(obj)) # depends on [control=['except'], data=[]]
return self._encode_key(value) |
def skipping_window(sequence, target, n=3):
"""
Return a sliding window with a constraint to check that
target is inside the window.
From http://stackoverflow.com/q/43626525/610569
>>> list(skipping_window([1,2,3,4,5], 2, 3))
[(1, 2, 3), (2, 3, 4)]
"""
start, stop = 0, n
seq = list(sequence)
while stop <= len(seq):
subseq = seq[start:stop]
if target in subseq:
yield tuple(seq[start:stop])
start += 1
stop += 1
# Fast forwarding the start.
# Find the next window which contains the target.
try:
# `seq.index(target, start) - (n-1)` would be the next
# window where the constraint is met.
start = max(seq.index(target, start) - (n-1), start)
stop = start + n
except ValueError:
break | def function[skipping_window, parameter[sequence, target, n]]:
constant[
Return a sliding window with a constraint to check that
target is inside the window.
From http://stackoverflow.com/q/43626525/610569
>>> list(skipping_window([1,2,3,4,5], 2, 3))
[(1, 2, 3), (2, 3, 4)]
]
<ast.Tuple object at 0x7da20e957760> assign[=] tuple[[<ast.Constant object at 0x7da20e956020>, <ast.Name object at 0x7da20e954b50>]]
variable[seq] assign[=] call[name[list], parameter[name[sequence]]]
while compare[name[stop] less_or_equal[<=] call[name[len], parameter[name[seq]]]] begin[:]
variable[subseq] assign[=] call[name[seq]][<ast.Slice object at 0x7da18c4ccac0>]
if compare[name[target] in name[subseq]] begin[:]
<ast.Yield object at 0x7da18c4ce020>
<ast.AugAssign object at 0x7da18c4cdb10>
<ast.AugAssign object at 0x7da18c4cfbb0>
<ast.Try object at 0x7da18c4cc310> | keyword[def] identifier[skipping_window] ( identifier[sequence] , identifier[target] , identifier[n] = literal[int] ):
literal[string]
identifier[start] , identifier[stop] = literal[int] , identifier[n]
identifier[seq] = identifier[list] ( identifier[sequence] )
keyword[while] identifier[stop] <= identifier[len] ( identifier[seq] ):
identifier[subseq] = identifier[seq] [ identifier[start] : identifier[stop] ]
keyword[if] identifier[target] keyword[in] identifier[subseq] :
keyword[yield] identifier[tuple] ( identifier[seq] [ identifier[start] : identifier[stop] ])
identifier[start] += literal[int]
identifier[stop] += literal[int]
keyword[try] :
identifier[start] = identifier[max] ( identifier[seq] . identifier[index] ( identifier[target] , identifier[start] )-( identifier[n] - literal[int] ), identifier[start] )
identifier[stop] = identifier[start] + identifier[n]
keyword[except] identifier[ValueError] :
keyword[break] | def skipping_window(sequence, target, n=3):
"""
Return a sliding window with a constraint to check that
target is inside the window.
From http://stackoverflow.com/q/43626525/610569
>>> list(skipping_window([1,2,3,4,5], 2, 3))
[(1, 2, 3), (2, 3, 4)]
"""
(start, stop) = (0, n)
seq = list(sequence)
while stop <= len(seq):
subseq = seq[start:stop]
if target in subseq:
yield tuple(seq[start:stop]) # depends on [control=['if'], data=[]]
start += 1
stop += 1
# Fast forwarding the start.
# Find the next window which contains the target.
try:
# `seq.index(target, start) - (n-1)` would be the next
# window where the constraint is met.
start = max(seq.index(target, start) - (n - 1), start)
stop = start + n # depends on [control=['try'], data=[]]
except ValueError:
break # depends on [control=['except'], data=[]] # depends on [control=['while'], data=['stop']] |
def get_statistics(self):
"""Get all statistics as a dictionary.
Returns
-------
statistics : Dict[str, List]
"""
return {
'cumulative_elapsed_time': self.get_cumulative_elapsed_time(),
'percentage': self.get_percentage(),
'n_splits': self.get_n_splits(),
'mean_per_split': self.get_mean_per_split(),
} | def function[get_statistics, parameter[self]]:
constant[Get all statistics as a dictionary.
Returns
-------
statistics : Dict[str, List]
]
return[dictionary[[<ast.Constant object at 0x7da1b1909930>, <ast.Constant object at 0x7da1b190bb20>, <ast.Constant object at 0x7da1b190b640>, <ast.Constant object at 0x7da1b190a6e0>], [<ast.Call object at 0x7da1b1909330>, <ast.Call object at 0x7da1b1908ee0>, <ast.Call object at 0x7da1b190a710>, <ast.Call object at 0x7da1b1908be0>]]] | keyword[def] identifier[get_statistics] ( identifier[self] ):
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[get_cumulative_elapsed_time] (),
literal[string] : identifier[self] . identifier[get_percentage] (),
literal[string] : identifier[self] . identifier[get_n_splits] (),
literal[string] : identifier[self] . identifier[get_mean_per_split] (),
} | def get_statistics(self):
"""Get all statistics as a dictionary.
Returns
-------
statistics : Dict[str, List]
"""
return {'cumulative_elapsed_time': self.get_cumulative_elapsed_time(), 'percentage': self.get_percentage(), 'n_splits': self.get_n_splits(), 'mean_per_split': self.get_mean_per_split()} |
def warning(title="", text="", width=DEFAULT_WIDTH,
height=DEFAULT_HEIGHT, timeout=None):
"""
Display a simple warning
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
"""
return _simple_dialog(Gtk.MessageType.WARNING,
text, title, width, height, timeout) | def function[warning, parameter[title, text, width, height, timeout]]:
constant[
Display a simple warning
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
]
return[call[name[_simple_dialog], parameter[name[Gtk].MessageType.WARNING, name[text], name[title], name[width], name[height], name[timeout]]]] | keyword[def] identifier[warning] ( identifier[title] = literal[string] , identifier[text] = literal[string] , identifier[width] = identifier[DEFAULT_WIDTH] ,
identifier[height] = identifier[DEFAULT_HEIGHT] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[return] identifier[_simple_dialog] ( identifier[Gtk] . identifier[MessageType] . identifier[WARNING] ,
identifier[text] , identifier[title] , identifier[width] , identifier[height] , identifier[timeout] ) | def warning(title='', text='', width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, timeout=None):
"""
Display a simple warning
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
"""
return _simple_dialog(Gtk.MessageType.WARNING, text, title, width, height, timeout) |
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`ManageData`.
"""
data_name = bytearray(self.data_name, encoding='utf-8')
if self.data_value is not None:
if isinstance(self.data_value, bytes):
data_value = [bytearray(self.data_value)]
else:
data_value = [bytearray(self.data_value, 'utf-8')]
else:
data_value = []
manage_data_op = Xdr.types.ManageDataOp(data_name, data_value)
self.body.type = Xdr.const.MANAGE_DATA
self.body.manageDataOp = manage_data_op
return super(ManageData, self).to_xdr_object() | def function[to_xdr_object, parameter[self]]:
constant[Creates an XDR Operation object that represents this
:class:`ManageData`.
]
variable[data_name] assign[=] call[name[bytearray], parameter[name[self].data_name]]
if compare[name[self].data_value is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[self].data_value, name[bytes]]] begin[:]
variable[data_value] assign[=] list[[<ast.Call object at 0x7da1b16c2d40>]]
variable[manage_data_op] assign[=] call[name[Xdr].types.ManageDataOp, parameter[name[data_name], name[data_value]]]
name[self].body.type assign[=] name[Xdr].const.MANAGE_DATA
name[self].body.manageDataOp assign[=] name[manage_data_op]
return[call[call[name[super], parameter[name[ManageData], name[self]]].to_xdr_object, parameter[]]] | keyword[def] identifier[to_xdr_object] ( identifier[self] ):
literal[string]
identifier[data_name] = identifier[bytearray] ( identifier[self] . identifier[data_name] , identifier[encoding] = literal[string] )
keyword[if] identifier[self] . identifier[data_value] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[data_value] , identifier[bytes] ):
identifier[data_value] =[ identifier[bytearray] ( identifier[self] . identifier[data_value] )]
keyword[else] :
identifier[data_value] =[ identifier[bytearray] ( identifier[self] . identifier[data_value] , literal[string] )]
keyword[else] :
identifier[data_value] =[]
identifier[manage_data_op] = identifier[Xdr] . identifier[types] . identifier[ManageDataOp] ( identifier[data_name] , identifier[data_value] )
identifier[self] . identifier[body] . identifier[type] = identifier[Xdr] . identifier[const] . identifier[MANAGE_DATA]
identifier[self] . identifier[body] . identifier[manageDataOp] = identifier[manage_data_op]
keyword[return] identifier[super] ( identifier[ManageData] , identifier[self] ). identifier[to_xdr_object] () | def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`ManageData`.
"""
data_name = bytearray(self.data_name, encoding='utf-8')
if self.data_value is not None:
if isinstance(self.data_value, bytes):
data_value = [bytearray(self.data_value)] # depends on [control=['if'], data=[]]
else:
data_value = [bytearray(self.data_value, 'utf-8')] # depends on [control=['if'], data=[]]
else:
data_value = []
manage_data_op = Xdr.types.ManageDataOp(data_name, data_value)
self.body.type = Xdr.const.MANAGE_DATA
self.body.manageDataOp = manage_data_op
return super(ManageData, self).to_xdr_object() |
def go(func, *args, **kwargs):
"""
Run a function in a new tasklet, like a goroutine.
If the goroutine raises an unhandled exception (*panics*),
the :func:`goless.on_panic` will be called,
which by default logs the error and exits the process.
:param args: Positional arguments to ``func``.
:param kwargs: Keyword arguments to ``func``.
"""
def safe_wrapped(f):
# noinspection PyBroadException
try:
f(*args, **kwargs)
except:
on_panic(*_sys.exc_info())
_be.start(safe_wrapped, func) | def function[go, parameter[func]]:
constant[
Run a function in a new tasklet, like a goroutine.
If the goroutine raises an unhandled exception (*panics*),
the :func:`goless.on_panic` will be called,
which by default logs the error and exits the process.
:param args: Positional arguments to ``func``.
:param kwargs: Keyword arguments to ``func``.
]
def function[safe_wrapped, parameter[f]]:
<ast.Try object at 0x7da20c993070>
call[name[_be].start, parameter[name[safe_wrapped], name[func]]] | keyword[def] identifier[go] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[safe_wrapped] ( identifier[f] ):
keyword[try] :
identifier[f] (* identifier[args] ,** identifier[kwargs] )
keyword[except] :
identifier[on_panic] (* identifier[_sys] . identifier[exc_info] ())
identifier[_be] . identifier[start] ( identifier[safe_wrapped] , identifier[func] ) | def go(func, *args, **kwargs):
"""
Run a function in a new tasklet, like a goroutine.
If the goroutine raises an unhandled exception (*panics*),
the :func:`goless.on_panic` will be called,
which by default logs the error and exits the process.
:param args: Positional arguments to ``func``.
:param kwargs: Keyword arguments to ``func``.
"""
def safe_wrapped(f):
# noinspection PyBroadException
try:
f(*args, **kwargs) # depends on [control=['try'], data=[]]
except:
on_panic(*_sys.exc_info()) # depends on [control=['except'], data=[]]
_be.start(safe_wrapped, func) |
def title(self, category):
""" Return the total printed length of this category item.
"""
return sum(
[self.getWidth(category, x) for x in self.fields]) | def function[title, parameter[self, category]]:
constant[ Return the total printed length of this category item.
]
return[call[name[sum], parameter[<ast.ListComp object at 0x7da20c76d5a0>]]] | keyword[def] identifier[title] ( identifier[self] , identifier[category] ):
literal[string]
keyword[return] identifier[sum] (
[ identifier[self] . identifier[getWidth] ( identifier[category] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[fields] ]) | def title(self, category):
""" Return the total printed length of this category item.
"""
return sum([self.getWidth(category, x) for x in self.fields]) |
def list(self, path=None, with_metadata=False, include_partitions=False):
'''get a list of all of the files in the repository'''
l = self.upstream.list(
path,
with_metadata=with_metadata,
include_partitions=include_partitions)
lp = {}
for k, v in l.items():
lp[k.replace('.gz', '')] = v
return lp | def function[list, parameter[self, path, with_metadata, include_partitions]]:
constant[get a list of all of the files in the repository]
variable[l] assign[=] call[name[self].upstream.list, parameter[name[path]]]
variable[lp] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18ede43a0>, <ast.Name object at 0x7da18ede7520>]]] in starred[call[name[l].items, parameter[]]] begin[:]
call[name[lp]][call[name[k].replace, parameter[constant[.gz], constant[]]]] assign[=] name[v]
return[name[lp]] | keyword[def] identifier[list] ( identifier[self] , identifier[path] = keyword[None] , identifier[with_metadata] = keyword[False] , identifier[include_partitions] = keyword[False] ):
literal[string]
identifier[l] = identifier[self] . identifier[upstream] . identifier[list] (
identifier[path] ,
identifier[with_metadata] = identifier[with_metadata] ,
identifier[include_partitions] = identifier[include_partitions] )
identifier[lp] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[l] . identifier[items] ():
identifier[lp] [ identifier[k] . identifier[replace] ( literal[string] , literal[string] )]= identifier[v]
keyword[return] identifier[lp] | def list(self, path=None, with_metadata=False, include_partitions=False):
"""get a list of all of the files in the repository"""
l = self.upstream.list(path, with_metadata=with_metadata, include_partitions=include_partitions)
lp = {}
for (k, v) in l.items():
lp[k.replace('.gz', '')] = v # depends on [control=['for'], data=[]]
return lp |
def find_germanet_xml_files(xml_path):
'''
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
'''
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files')
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None
else:
if 1 < len(gn_rels_file):
print ('WARNING: more than one relations file gn_relations.xml, '
'taking first match')
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if
re.match(r'wiktionaryparaphrases-',
os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files')
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if
os.path.basename(xml_file).lower().startswith(
'interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file')
if xml_files:
print('WARNING: unrecognised xml files:', xml_files)
return lex_files, gn_rels_file, wiktionary_files, ili_files | def function[find_germanet_xml_files, parameter[xml_path]]:
constant[
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
]
variable[xml_files] assign[=] call[name[sorted], parameter[call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[xml_path], constant[*.xml]]]]]]]
variable[lex_files] assign[=] <ast.ListComp object at 0x7da1b0f2ee60>
variable[xml_files] assign[=] call[name[sorted], parameter[binary_operation[call[name[set], parameter[name[xml_files]]] - call[name[set], parameter[name[lex_files]]]]]]
if <ast.UnaryOp object at 0x7da1b0f2c310> begin[:]
call[name[print], parameter[constant[ERROR: cannot find lexical information files]]]
variable[gn_rels_file] assign[=] <ast.ListComp object at 0x7da1b0f2e470>
variable[xml_files] assign[=] call[name[sorted], parameter[binary_operation[call[name[set], parameter[name[xml_files]]] - call[name[set], parameter[name[gn_rels_file]]]]]]
if <ast.UnaryOp object at 0x7da1b0f2e650> begin[:]
call[name[print], parameter[constant[ERROR: cannot find relations file gn_relations.xml]]]
variable[gn_rels_file] assign[=] constant[None]
variable[wiktionary_files] assign[=] <ast.ListComp object at 0x7da1b0f2c610>
variable[xml_files] assign[=] call[name[sorted], parameter[binary_operation[call[name[set], parameter[name[xml_files]]] - call[name[set], parameter[name[wiktionary_files]]]]]]
if <ast.UnaryOp object at 0x7da1b0f2f0a0> begin[:]
call[name[print], parameter[constant[WARNING: cannot find wiktionary paraphrase files]]]
variable[ili_files] assign[=] <ast.ListComp object at 0x7da1b0f2e9b0>
variable[xml_files] assign[=] call[name[sorted], parameter[binary_operation[call[name[set], parameter[name[xml_files]]] - call[name[set], parameter[name[ili_files]]]]]]
if <ast.UnaryOp object at 0x7da1b0f2e920> begin[:]
call[name[print], parameter[constant[WARNING: cannot find interlingual index file]]]
if name[xml_files] begin[:]
call[name[print], parameter[constant[WARNING: unrecognised xml files:], name[xml_files]]]
return[tuple[[<ast.Name object at 0x7da1b10c6260>, <ast.Name object at 0x7da1b10c56c0>, <ast.Name object at 0x7da1b10c7a00>, <ast.Name object at 0x7da1b10c7640>]]] | keyword[def] identifier[find_germanet_xml_files] ( identifier[xml_path] ):
literal[string]
identifier[xml_files] = identifier[sorted] ( identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[xml_path] , literal[string] )))
identifier[lex_files] =[ identifier[xml_file] keyword[for] identifier[xml_file] keyword[in] identifier[xml_files] keyword[if]
identifier[re] . identifier[match] ( literal[string] ,
identifier[os] . identifier[path] . identifier[basename] ( identifier[xml_file] ). identifier[lower] ())]
identifier[xml_files] = identifier[sorted] ( identifier[set] ( identifier[xml_files] )- identifier[set] ( identifier[lex_files] ))
keyword[if] keyword[not] identifier[lex_files] :
identifier[print] ( literal[string] )
identifier[gn_rels_file] =[ identifier[xml_file] keyword[for] identifier[xml_file] keyword[in] identifier[xml_files] keyword[if]
identifier[os] . identifier[path] . identifier[basename] ( identifier[xml_file] ). identifier[lower] ()== literal[string] ]
identifier[xml_files] = identifier[sorted] ( identifier[set] ( identifier[xml_files] )- identifier[set] ( identifier[gn_rels_file] ))
keyword[if] keyword[not] identifier[gn_rels_file] :
identifier[print] ( literal[string] )
identifier[gn_rels_file] = keyword[None]
keyword[else] :
keyword[if] literal[int] < identifier[len] ( identifier[gn_rels_file] ):
identifier[print] ( literal[string]
literal[string] )
identifier[gn_rels_file] = identifier[gn_rels_file] [ literal[int] ]
identifier[wiktionary_files] =[ identifier[xml_file] keyword[for] identifier[xml_file] keyword[in] identifier[xml_files] keyword[if]
identifier[re] . identifier[match] ( literal[string] ,
identifier[os] . identifier[path] . identifier[basename] ( identifier[xml_file] ). identifier[lower] ())]
identifier[xml_files] = identifier[sorted] ( identifier[set] ( identifier[xml_files] )- identifier[set] ( identifier[wiktionary_files] ))
keyword[if] keyword[not] identifier[wiktionary_files] :
identifier[print] ( literal[string] )
identifier[ili_files] =[ identifier[xml_file] keyword[for] identifier[xml_file] keyword[in] identifier[xml_files] keyword[if]
identifier[os] . identifier[path] . identifier[basename] ( identifier[xml_file] ). identifier[lower] (). identifier[startswith] (
literal[string] )]
identifier[xml_files] = identifier[sorted] ( identifier[set] ( identifier[xml_files] )- identifier[set] ( identifier[ili_files] ))
keyword[if] keyword[not] identifier[ili_files] :
identifier[print] ( literal[string] )
keyword[if] identifier[xml_files] :
identifier[print] ( literal[string] , identifier[xml_files] )
keyword[return] identifier[lex_files] , identifier[gn_rels_file] , identifier[wiktionary_files] , identifier[ili_files] | def find_germanet_xml_files(xml_path):
"""
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files
"""
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if re.match('(adj|nomen|verben)\\.', os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(lex_files))
if not lex_files:
print('ERROR: cannot find lexical information files') # depends on [control=['if'], data=[]]
# sort out the GermaNet relations file
gn_rels_file = [xml_file for xml_file in xml_files if os.path.basename(xml_file).lower() == 'gn_relations.xml']
xml_files = sorted(set(xml_files) - set(gn_rels_file))
if not gn_rels_file:
print('ERROR: cannot find relations file gn_relations.xml')
gn_rels_file = None # depends on [control=['if'], data=[]]
else:
if 1 < len(gn_rels_file):
print('WARNING: more than one relations file gn_relations.xml, taking first match') # depends on [control=['if'], data=[]]
gn_rels_file = gn_rels_file[0]
# sort out the wiktionary paraphrase files
wiktionary_files = [xml_file for xml_file in xml_files if re.match('wiktionaryparaphrases-', os.path.basename(xml_file).lower())]
xml_files = sorted(set(xml_files) - set(wiktionary_files))
if not wiktionary_files:
print('WARNING: cannot find wiktionary paraphrase files') # depends on [control=['if'], data=[]]
# sort out the interlingual index file
ili_files = [xml_file for xml_file in xml_files if os.path.basename(xml_file).lower().startswith('interlingualindex')]
xml_files = sorted(set(xml_files) - set(ili_files))
if not ili_files:
print('WARNING: cannot find interlingual index file') # depends on [control=['if'], data=[]]
if xml_files:
print('WARNING: unrecognised xml files:', xml_files) # depends on [control=['if'], data=[]]
return (lex_files, gn_rels_file, wiktionary_files, ili_files) |
def medianscore(inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = copy.deepcopy(inlist)
newlist.sort()
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist) / 2 # integer division correct
median = float(newlist[index] + newlist[index - 1]) / 2
else:
index = len(newlist) / 2 # int divsion gives mid value when count from 0
median = newlist[index]
return median | def function[medianscore, parameter[inlist]]:
constant[
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
]
variable[newlist] assign[=] call[name[copy].deepcopy, parameter[name[inlist]]]
call[name[newlist].sort, parameter[]]
if compare[binary_operation[call[name[len], parameter[name[newlist]]] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[0]] begin[:]
variable[index] assign[=] binary_operation[call[name[len], parameter[name[newlist]]] / constant[2]]
variable[median] assign[=] binary_operation[call[name[float], parameter[binary_operation[call[name[newlist]][name[index]] + call[name[newlist]][binary_operation[name[index] - constant[1]]]]]] / constant[2]]
return[name[median]] | keyword[def] identifier[medianscore] ( identifier[inlist] ):
literal[string]
identifier[newlist] = identifier[copy] . identifier[deepcopy] ( identifier[inlist] )
identifier[newlist] . identifier[sort] ()
keyword[if] identifier[len] ( identifier[newlist] )% literal[int] == literal[int] :
identifier[index] = identifier[len] ( identifier[newlist] )/ literal[int]
identifier[median] = identifier[float] ( identifier[newlist] [ identifier[index] ]+ identifier[newlist] [ identifier[index] - literal[int] ])/ literal[int]
keyword[else] :
identifier[index] = identifier[len] ( identifier[newlist] )/ literal[int]
identifier[median] = identifier[newlist] [ identifier[index] ]
keyword[return] identifier[median] | def medianscore(inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = copy.deepcopy(inlist)
newlist.sort()
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist) / 2 # integer division correct
median = float(newlist[index] + newlist[index - 1]) / 2 # depends on [control=['if'], data=[]]
else:
index = len(newlist) / 2 # int divsion gives mid value when count from 0
median = newlist[index]
return median |
def checkPrediction2(possibleOutcome, predictedOutcome):
"""
:param possibleOutcome: list of all possible outcomes
:param predictedOutcome: list of all predicted outomes
:return missN: number of misses (a possible outcome not predicted)
fpN: number of false positives (a predicted outcome is not possible to happen)
"""
missN = 0
for i in xrange(len(possibleOutcome)):
miss = 1
for j in xrange(len(predictedOutcome)):
if predictedOutcome[j] == possibleOutcome[i]:
miss = 0
missN += miss
fpN = 0
for i in xrange(len(predictedOutcome)):
fp = 1
for j in xrange(len(possibleOutcome)):
if predictedOutcome[i] == possibleOutcome[j]:
fp = 0
fpN += fp
return (missN, fpN) | def function[checkPrediction2, parameter[possibleOutcome, predictedOutcome]]:
constant[
:param possibleOutcome: list of all possible outcomes
:param predictedOutcome: list of all predicted outomes
:return missN: number of misses (a possible outcome not predicted)
fpN: number of false positives (a predicted outcome is not possible to happen)
]
variable[missN] assign[=] constant[0]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[possibleOutcome]]]]]] begin[:]
variable[miss] assign[=] constant[1]
for taget[name[j]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[predictedOutcome]]]]]] begin[:]
if compare[call[name[predictedOutcome]][name[j]] equal[==] call[name[possibleOutcome]][name[i]]] begin[:]
variable[miss] assign[=] constant[0]
<ast.AugAssign object at 0x7da1b08a32e0>
variable[fpN] assign[=] constant[0]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[predictedOutcome]]]]]] begin[:]
variable[fp] assign[=] constant[1]
for taget[name[j]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[possibleOutcome]]]]]] begin[:]
if compare[call[name[predictedOutcome]][name[i]] equal[==] call[name[possibleOutcome]][name[j]]] begin[:]
variable[fp] assign[=] constant[0]
<ast.AugAssign object at 0x7da1b09018a0>
return[tuple[[<ast.Name object at 0x7da1b09004c0>, <ast.Name object at 0x7da1b09021d0>]]] | keyword[def] identifier[checkPrediction2] ( identifier[possibleOutcome] , identifier[predictedOutcome] ):
literal[string]
identifier[missN] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[possibleOutcome] )):
identifier[miss] = literal[int]
keyword[for] identifier[j] keyword[in] identifier[xrange] ( identifier[len] ( identifier[predictedOutcome] )):
keyword[if] identifier[predictedOutcome] [ identifier[j] ]== identifier[possibleOutcome] [ identifier[i] ]:
identifier[miss] = literal[int]
identifier[missN] += identifier[miss]
identifier[fpN] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[predictedOutcome] )):
identifier[fp] = literal[int]
keyword[for] identifier[j] keyword[in] identifier[xrange] ( identifier[len] ( identifier[possibleOutcome] )):
keyword[if] identifier[predictedOutcome] [ identifier[i] ]== identifier[possibleOutcome] [ identifier[j] ]:
identifier[fp] = literal[int]
identifier[fpN] += identifier[fp]
keyword[return] ( identifier[missN] , identifier[fpN] ) | def checkPrediction2(possibleOutcome, predictedOutcome):
"""
:param possibleOutcome: list of all possible outcomes
:param predictedOutcome: list of all predicted outomes
:return missN: number of misses (a possible outcome not predicted)
fpN: number of false positives (a predicted outcome is not possible to happen)
"""
missN = 0
for i in xrange(len(possibleOutcome)):
miss = 1
for j in xrange(len(predictedOutcome)):
if predictedOutcome[j] == possibleOutcome[i]:
miss = 0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']]
missN += miss # depends on [control=['for'], data=['i']]
fpN = 0
for i in xrange(len(predictedOutcome)):
fp = 1
for j in xrange(len(possibleOutcome)):
if predictedOutcome[i] == possibleOutcome[j]:
fp = 0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']]
fpN += fp # depends on [control=['for'], data=['i']]
return (missN, fpN) |
def cmd_reload_global(self, plname):
"""reload_global `plname`
Reload the *global* plugin named `plname`. You should close
all instances of the plugin before attempting to reload.
"""
gpmon = self.fv.gpmon
p_info = gpmon.get_plugin_info(plname)
gpmon.stop_plugin(p_info)
self.fv.update_pending(0.5)
self.fv.mm.load_module(plname)
gpmon.reload_plugin(plname)
self.fv.start_global_plugin(plname)
return True | def function[cmd_reload_global, parameter[self, plname]]:
constant[reload_global `plname`
Reload the *global* plugin named `plname`. You should close
all instances of the plugin before attempting to reload.
]
variable[gpmon] assign[=] name[self].fv.gpmon
variable[p_info] assign[=] call[name[gpmon].get_plugin_info, parameter[name[plname]]]
call[name[gpmon].stop_plugin, parameter[name[p_info]]]
call[name[self].fv.update_pending, parameter[constant[0.5]]]
call[name[self].fv.mm.load_module, parameter[name[plname]]]
call[name[gpmon].reload_plugin, parameter[name[plname]]]
call[name[self].fv.start_global_plugin, parameter[name[plname]]]
return[constant[True]] | keyword[def] identifier[cmd_reload_global] ( identifier[self] , identifier[plname] ):
literal[string]
identifier[gpmon] = identifier[self] . identifier[fv] . identifier[gpmon]
identifier[p_info] = identifier[gpmon] . identifier[get_plugin_info] ( identifier[plname] )
identifier[gpmon] . identifier[stop_plugin] ( identifier[p_info] )
identifier[self] . identifier[fv] . identifier[update_pending] ( literal[int] )
identifier[self] . identifier[fv] . identifier[mm] . identifier[load_module] ( identifier[plname] )
identifier[gpmon] . identifier[reload_plugin] ( identifier[plname] )
identifier[self] . identifier[fv] . identifier[start_global_plugin] ( identifier[plname] )
keyword[return] keyword[True] | def cmd_reload_global(self, plname):
"""reload_global `plname`
Reload the *global* plugin named `plname`. You should close
all instances of the plugin before attempting to reload.
"""
gpmon = self.fv.gpmon
p_info = gpmon.get_plugin_info(plname)
gpmon.stop_plugin(p_info)
self.fv.update_pending(0.5)
self.fv.mm.load_module(plname)
gpmon.reload_plugin(plname)
self.fv.start_global_plugin(plname)
return True |
def getTargetFnDef(node,path,fdefs,cdefs,imp_funcs,imp_mods,imp_classes):
''' Return the function node that the input call node targets.
Note that cases 2b and 2c might make false matches. If two classes are
imported by the same program and they both have a method with an identical
name, then class.method() will be associated with the first class in
imp_classes.'''
#CASE 1: calling function inside namespace, like foo(x) or randint(x,y)
if isinstance(node.func,ast.Name):
# CASE 1A: calling an infile function
if path in fdefs:
for x in fdefs[path]:
if node.func.id == x.name:
return x
# CASE 1B: calling an imported function
if path in imp_funcs:
for x in imp_funcs[path]:
if node.func.id == x.name:
return x
# CASE 1C: infile object instantiation, i.e. a=Car()
if path in cdefs:
for x in cdefs[path]:
if node.func.id == x.name:
classfuncs = [y for y in fdefs[path] if y.pclass==x]
initfuncs = [z for z in classfuncs if z.name=='__init__']
if initfuncs:
return initfuncs[0]
# CASE 1D: imported object instantiation, from exe import car; a=Car()
if path in imp_classes:
for x in imp_classes[path]:
if node.func.id == x.name:
classfuncs = [y for y in fdefs[x.path] if y.pclass==x]
initfuncs = [z for z in classfuncs if z.name=='__init__']
if initfuncs:
return initfuncs[0]
return None # builtin functions
# CASE 2: calling function outside namespace, like random.randint(x,y)
elif isinstance(node.func,ast.Attribute):
try:
obj = node.func.value.id
method = node.func.attr
except AttributeError:
return None #weird string thingies
if obj == 'self':
return None #setting attrs in class def.
# CASE 2A: calling imported module.function
for modpath in imp_mods[path]:
if not modpath:
continue
elif obj+'.py' in modpath:
if modpath in fdefs:
matches = [x for x in fdefs[modpath] if x.name==method]
if matches:
if len(matches)>1:
pass
#print("multiple matches found for "+method)
return matches[0]
# CASE 2B: object instantiation with an imported module
if modpath not in cdefs:
continue
for clss in cdefs[modpath]:
if clss.name==method:
classfuncs = [y for y in fdefs[clss.path] if y.pclass==clss]
initfuncs = [z for z in classfuncs if z.name=='__init__']
if initfuncs:
return initfuncs[0]
# CASE 2C: calling infile class.method
if path in cdefs:
for clss in cdefs[path]:
for x in fdefs[clss.path]:
if x.pclass==clss:
return x
# CASE 2D: calling imported class.method
if path in imp_classes:
for clss in imp_classes[path]:
for x in fdefs[clss.path]:
if x.pclass==clss:
if x.name==method:
return x
return None | def function[getTargetFnDef, parameter[node, path, fdefs, cdefs, imp_funcs, imp_mods, imp_classes]]:
constant[ Return the function node that the input call node targets.
Note that cases 2b and 2c might make false matches. If two classes are
imported by the same program and they both have a method with an identical
name, then class.method() will be associated with the first class in
imp_classes.]
if call[name[isinstance], parameter[name[node].func, name[ast].Name]] begin[:]
if compare[name[path] in name[fdefs]] begin[:]
for taget[name[x]] in starred[call[name[fdefs]][name[path]]] begin[:]
if compare[name[node].func.id equal[==] name[x].name] begin[:]
return[name[x]]
if compare[name[path] in name[imp_funcs]] begin[:]
for taget[name[x]] in starred[call[name[imp_funcs]][name[path]]] begin[:]
if compare[name[node].func.id equal[==] name[x].name] begin[:]
return[name[x]]
if compare[name[path] in name[cdefs]] begin[:]
for taget[name[x]] in starred[call[name[cdefs]][name[path]]] begin[:]
if compare[name[node].func.id equal[==] name[x].name] begin[:]
variable[classfuncs] assign[=] <ast.ListComp object at 0x7da1b27e8be0>
variable[initfuncs] assign[=] <ast.ListComp object at 0x7da1b27eb610>
if name[initfuncs] begin[:]
return[call[name[initfuncs]][constant[0]]]
if compare[name[path] in name[imp_classes]] begin[:]
for taget[name[x]] in starred[call[name[imp_classes]][name[path]]] begin[:]
if compare[name[node].func.id equal[==] name[x].name] begin[:]
variable[classfuncs] assign[=] <ast.ListComp object at 0x7da1b27e8130>
variable[initfuncs] assign[=] <ast.ListComp object at 0x7da1b27eb700>
if name[initfuncs] begin[:]
return[call[name[initfuncs]][constant[0]]]
return[constant[None]] | keyword[def] identifier[getTargetFnDef] ( identifier[node] , identifier[path] , identifier[fdefs] , identifier[cdefs] , identifier[imp_funcs] , identifier[imp_mods] , identifier[imp_classes] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[node] . identifier[func] , identifier[ast] . identifier[Name] ):
keyword[if] identifier[path] keyword[in] identifier[fdefs] :
keyword[for] identifier[x] keyword[in] identifier[fdefs] [ identifier[path] ]:
keyword[if] identifier[node] . identifier[func] . identifier[id] == identifier[x] . identifier[name] :
keyword[return] identifier[x]
keyword[if] identifier[path] keyword[in] identifier[imp_funcs] :
keyword[for] identifier[x] keyword[in] identifier[imp_funcs] [ identifier[path] ]:
keyword[if] identifier[node] . identifier[func] . identifier[id] == identifier[x] . identifier[name] :
keyword[return] identifier[x]
keyword[if] identifier[path] keyword[in] identifier[cdefs] :
keyword[for] identifier[x] keyword[in] identifier[cdefs] [ identifier[path] ]:
keyword[if] identifier[node] . identifier[func] . identifier[id] == identifier[x] . identifier[name] :
identifier[classfuncs] =[ identifier[y] keyword[for] identifier[y] keyword[in] identifier[fdefs] [ identifier[path] ] keyword[if] identifier[y] . identifier[pclass] == identifier[x] ]
identifier[initfuncs] =[ identifier[z] keyword[for] identifier[z] keyword[in] identifier[classfuncs] keyword[if] identifier[z] . identifier[name] == literal[string] ]
keyword[if] identifier[initfuncs] :
keyword[return] identifier[initfuncs] [ literal[int] ]
keyword[if] identifier[path] keyword[in] identifier[imp_classes] :
keyword[for] identifier[x] keyword[in] identifier[imp_classes] [ identifier[path] ]:
keyword[if] identifier[node] . identifier[func] . identifier[id] == identifier[x] . identifier[name] :
identifier[classfuncs] =[ identifier[y] keyword[for] identifier[y] keyword[in] identifier[fdefs] [ identifier[x] . identifier[path] ] keyword[if] identifier[y] . identifier[pclass] == identifier[x] ]
identifier[initfuncs] =[ identifier[z] keyword[for] identifier[z] keyword[in] identifier[classfuncs] keyword[if] identifier[z] . identifier[name] == literal[string] ]
keyword[if] identifier[initfuncs] :
keyword[return] identifier[initfuncs] [ literal[int] ]
keyword[return] keyword[None]
keyword[elif] identifier[isinstance] ( identifier[node] . identifier[func] , identifier[ast] . identifier[Attribute] ):
keyword[try] :
identifier[obj] = identifier[node] . identifier[func] . identifier[value] . identifier[id]
identifier[method] = identifier[node] . identifier[func] . identifier[attr]
keyword[except] identifier[AttributeError] :
keyword[return] keyword[None]
keyword[if] identifier[obj] == literal[string] :
keyword[return] keyword[None]
keyword[for] identifier[modpath] keyword[in] identifier[imp_mods] [ identifier[path] ]:
keyword[if] keyword[not] identifier[modpath] :
keyword[continue]
keyword[elif] identifier[obj] + literal[string] keyword[in] identifier[modpath] :
keyword[if] identifier[modpath] keyword[in] identifier[fdefs] :
identifier[matches] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[fdefs] [ identifier[modpath] ] keyword[if] identifier[x] . identifier[name] == identifier[method] ]
keyword[if] identifier[matches] :
keyword[if] identifier[len] ( identifier[matches] )> literal[int] :
keyword[pass]
keyword[return] identifier[matches] [ literal[int] ]
keyword[if] identifier[modpath] keyword[not] keyword[in] identifier[cdefs] :
keyword[continue]
keyword[for] identifier[clss] keyword[in] identifier[cdefs] [ identifier[modpath] ]:
keyword[if] identifier[clss] . identifier[name] == identifier[method] :
identifier[classfuncs] =[ identifier[y] keyword[for] identifier[y] keyword[in] identifier[fdefs] [ identifier[clss] . identifier[path] ] keyword[if] identifier[y] . identifier[pclass] == identifier[clss] ]
identifier[initfuncs] =[ identifier[z] keyword[for] identifier[z] keyword[in] identifier[classfuncs] keyword[if] identifier[z] . identifier[name] == literal[string] ]
keyword[if] identifier[initfuncs] :
keyword[return] identifier[initfuncs] [ literal[int] ]
keyword[if] identifier[path] keyword[in] identifier[cdefs] :
keyword[for] identifier[clss] keyword[in] identifier[cdefs] [ identifier[path] ]:
keyword[for] identifier[x] keyword[in] identifier[fdefs] [ identifier[clss] . identifier[path] ]:
keyword[if] identifier[x] . identifier[pclass] == identifier[clss] :
keyword[return] identifier[x]
keyword[if] identifier[path] keyword[in] identifier[imp_classes] :
keyword[for] identifier[clss] keyword[in] identifier[imp_classes] [ identifier[path] ]:
keyword[for] identifier[x] keyword[in] identifier[fdefs] [ identifier[clss] . identifier[path] ]:
keyword[if] identifier[x] . identifier[pclass] == identifier[clss] :
keyword[if] identifier[x] . identifier[name] == identifier[method] :
keyword[return] identifier[x]
keyword[return] keyword[None] | def getTargetFnDef(node, path, fdefs, cdefs, imp_funcs, imp_mods, imp_classes):
""" Return the function node that the input call node targets.
Note that cases 2b and 2c might make false matches. If two classes are
imported by the same program and they both have a method with an identical
name, then class.method() will be associated with the first class in
imp_classes."""
#CASE 1: calling function inside namespace, like foo(x) or randint(x,y)
if isinstance(node.func, ast.Name):
# CASE 1A: calling an infile function
if path in fdefs:
for x in fdefs[path]:
if node.func.id == x.name:
return x # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['if'], data=['path', 'fdefs']]
# CASE 1B: calling an imported function
if path in imp_funcs:
for x in imp_funcs[path]:
if node.func.id == x.name:
return x # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['if'], data=['path', 'imp_funcs']]
# CASE 1C: infile object instantiation, i.e. a=Car()
if path in cdefs:
for x in cdefs[path]:
if node.func.id == x.name:
classfuncs = [y for y in fdefs[path] if y.pclass == x]
initfuncs = [z for z in classfuncs if z.name == '__init__']
if initfuncs:
return initfuncs[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['if'], data=['path', 'cdefs']]
# CASE 1D: imported object instantiation, from exe import car; a=Car()
if path in imp_classes:
for x in imp_classes[path]:
if node.func.id == x.name:
classfuncs = [y for y in fdefs[x.path] if y.pclass == x]
initfuncs = [z for z in classfuncs if z.name == '__init__']
if initfuncs:
return initfuncs[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['if'], data=['path', 'imp_classes']]
return None # builtin functions # depends on [control=['if'], data=[]]
# CASE 2: calling function outside namespace, like random.randint(x,y)
elif isinstance(node.func, ast.Attribute):
try:
obj = node.func.value.id
method = node.func.attr # depends on [control=['try'], data=[]]
except AttributeError:
return None #weird string thingies # depends on [control=['except'], data=[]]
if obj == 'self':
return None #setting attrs in class def. # depends on [control=['if'], data=[]]
# CASE 2A: calling imported module.function
for modpath in imp_mods[path]:
if not modpath:
continue # depends on [control=['if'], data=[]]
elif obj + '.py' in modpath:
if modpath in fdefs:
matches = [x for x in fdefs[modpath] if x.name == method]
if matches:
if len(matches) > 1:
pass # depends on [control=['if'], data=[]]
#print("multiple matches found for "+method)
return matches[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['modpath', 'fdefs']] # depends on [control=['if'], data=['modpath']]
# CASE 2B: object instantiation with an imported module
if modpath not in cdefs:
continue # depends on [control=['if'], data=[]]
for clss in cdefs[modpath]:
if clss.name == method:
classfuncs = [y for y in fdefs[clss.path] if y.pclass == clss]
initfuncs = [z for z in classfuncs if z.name == '__init__']
if initfuncs:
return initfuncs[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['clss']] # depends on [control=['for'], data=['modpath']] # CASE 2C: calling infile class.method
if path in cdefs:
for clss in cdefs[path]:
for x in fdefs[clss.path]:
if x.pclass == clss:
return x # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['clss']] # depends on [control=['if'], data=['path', 'cdefs']] # CASE 2D: calling imported class.method
if path in imp_classes:
for clss in imp_classes[path]:
for x in fdefs[clss.path]:
if x.pclass == clss:
if x.name == method:
return x # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['clss']] # depends on [control=['if'], data=['path', 'imp_classes']]
return None # depends on [control=['if'], data=[]] |
def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode("utf-8")
if mtype == "audio/mpeg":
ext = ".mp3"
elif mtype == "audio/x-wav":
ext = ".wav"
else:
ext = "." + self.get("original-format")
return ext | def function[get_file_extension, parameter[self, filepath]]:
constant[
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
]
variable[mtype] assign[=] call[name[magic].from_file, parameter[name[filepath]]]
if compare[call[name[type], parameter[name[mtype]]] equal[==] name[bytes]] begin[:]
variable[mtype] assign[=] call[name[mtype].decode, parameter[constant[utf-8]]]
if compare[name[mtype] equal[==] constant[audio/mpeg]] begin[:]
variable[ext] assign[=] constant[.mp3]
return[name[ext]] | keyword[def] identifier[get_file_extension] ( identifier[self] , identifier[filepath] ):
literal[string]
identifier[mtype] = identifier[magic] . identifier[from_file] ( identifier[filepath] , identifier[mime] = keyword[True] )
keyword[if] identifier[type] ( identifier[mtype] )== identifier[bytes] :
identifier[mtype] = identifier[mtype] . identifier[decode] ( literal[string] )
keyword[if] identifier[mtype] == literal[string] :
identifier[ext] = literal[string]
keyword[elif] identifier[mtype] == literal[string] :
identifier[ext] = literal[string]
keyword[else] :
identifier[ext] = literal[string] + identifier[self] . identifier[get] ( literal[string] )
keyword[return] identifier[ext] | def get_file_extension(self, filepath):
"""
This method check mimetype to define file extension.
If it can't, it use original-format metadata.
"""
mtype = magic.from_file(filepath, mime=True)
if type(mtype) == bytes:
mtype = mtype.decode('utf-8') # depends on [control=['if'], data=[]]
if mtype == 'audio/mpeg':
ext = '.mp3' # depends on [control=['if'], data=[]]
elif mtype == 'audio/x-wav':
ext = '.wav' # depends on [control=['if'], data=[]]
else:
ext = '.' + self.get('original-format')
return ext |
def _bindBucket(self, bucket_name, create=False, block=True, versioning=False):
"""
Return the Boto Bucket object representing the S3 bucket with the given name. If the
bucket does not exist and `create` is True, it will be created.
:param str bucket_name: the name of the bucket to bind to
:param bool create: Whether to create bucket the if it doesn't exist
:param bool block: If False, return None if the bucket doesn't exist. If True, wait until
bucket appears. Ignored if `create` is True.
:rtype: Bucket|None
:raises S3ResponseError: If `block` is True and the bucket still doesn't exist after the
retry timeout expires.
"""
assert self.minBucketNameLen <= len(bucket_name) <= self.maxBucketNameLen
assert self.bucketNameRe.match(bucket_name)
log.debug("Binding to job store bucket '%s'.", bucket_name)
def bucket_creation_pending(e):
# https://github.com/BD2KGenomics/toil/issues/955
# https://github.com/BD2KGenomics/toil/issues/995
# https://github.com/BD2KGenomics/toil/issues/1093
return (isinstance(e, (S3CreateError, S3ResponseError))
and e.error_code in ('BucketAlreadyOwnedByYou', 'OperationAborted'))
bucketExisted = True
for attempt in retry_s3(predicate=bucket_creation_pending):
with attempt:
try:
bucket = self.s3.get_bucket(bucket_name, validate=True)
except S3ResponseError as e:
if e.error_code == 'NoSuchBucket':
bucketExisted = False
log.debug("Bucket '%s' does not exist.", bucket_name)
if create:
log.debug("Creating bucket '%s'.", bucket_name)
location = region_to_bucket_location(self.region)
bucket = self.s3.create_bucket(bucket_name, location=location)
assert self.__getBucketRegion(bucket) == self.region
elif block:
raise
else:
return None
elif e.status == 301:
# This is raised if the user attempts to get a bucket in a region outside
# the specified one, if the specified one is not `us-east-1`. The us-east-1
# server allows a user to use buckets from any region.
bucket = self.s3.get_bucket(bucket_name, validate=False)
raise BucketLocationConflictException(self.__getBucketRegion(bucket))
else:
raise
else:
if self.__getBucketRegion(bucket) != self.region:
raise BucketLocationConflictException(self.__getBucketRegion(bucket))
if versioning and not bucketExisted:
# only call this method on bucket creation
bucket.configure_versioning(True)
else:
# now test for versioning consistency
# we should never see any of these errors since 'versioning' should always be true
bucket_versioning = self.__getBucketVersioning(bucket)
if bucket_versioning != versioning:
assert False, 'Cannot modify versioning on existing bucket'
elif bucket_versioning is None:
assert False, 'Cannot use a bucket with versioning suspended'
if bucketExisted:
log.debug("Using pre-existing job store bucket '%s'.", bucket_name)
else:
log.debug("Created new job store bucket '%s'.", bucket_name)
return bucket | def function[_bindBucket, parameter[self, bucket_name, create, block, versioning]]:
constant[
Return the Boto Bucket object representing the S3 bucket with the given name. If the
bucket does not exist and `create` is True, it will be created.
:param str bucket_name: the name of the bucket to bind to
:param bool create: Whether to create bucket the if it doesn't exist
:param bool block: If False, return None if the bucket doesn't exist. If True, wait until
bucket appears. Ignored if `create` is True.
:rtype: Bucket|None
:raises S3ResponseError: If `block` is True and the bucket still doesn't exist after the
retry timeout expires.
]
assert[compare[name[self].minBucketNameLen less_or_equal[<=] call[name[len], parameter[name[bucket_name]]]]]
assert[call[name[self].bucketNameRe.match, parameter[name[bucket_name]]]]
call[name[log].debug, parameter[constant[Binding to job store bucket '%s'.], name[bucket_name]]]
def function[bucket_creation_pending, parameter[e]]:
return[<ast.BoolOp object at 0x7da20c796410>]
variable[bucketExisted] assign[=] constant[True]
for taget[name[attempt]] in starred[call[name[retry_s3], parameter[]]] begin[:]
with name[attempt] begin[:]
<ast.Try object at 0x7da20c794a60>
if <ast.BoolOp object at 0x7da1b1eef220> begin[:]
call[name[bucket].configure_versioning, parameter[constant[True]]]
if name[bucketExisted] begin[:]
call[name[log].debug, parameter[constant[Using pre-existing job store bucket '%s'.], name[bucket_name]]]
return[name[bucket]] | keyword[def] identifier[_bindBucket] ( identifier[self] , identifier[bucket_name] , identifier[create] = keyword[False] , identifier[block] = keyword[True] , identifier[versioning] = keyword[False] ):
literal[string]
keyword[assert] identifier[self] . identifier[minBucketNameLen] <= identifier[len] ( identifier[bucket_name] )<= identifier[self] . identifier[maxBucketNameLen]
keyword[assert] identifier[self] . identifier[bucketNameRe] . identifier[match] ( identifier[bucket_name] )
identifier[log] . identifier[debug] ( literal[string] , identifier[bucket_name] )
keyword[def] identifier[bucket_creation_pending] ( identifier[e] ):
keyword[return] ( identifier[isinstance] ( identifier[e] ,( identifier[S3CreateError] , identifier[S3ResponseError] ))
keyword[and] identifier[e] . identifier[error_code] keyword[in] ( literal[string] , literal[string] ))
identifier[bucketExisted] = keyword[True]
keyword[for] identifier[attempt] keyword[in] identifier[retry_s3] ( identifier[predicate] = identifier[bucket_creation_pending] ):
keyword[with] identifier[attempt] :
keyword[try] :
identifier[bucket] = identifier[self] . identifier[s3] . identifier[get_bucket] ( identifier[bucket_name] , identifier[validate] = keyword[True] )
keyword[except] identifier[S3ResponseError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[error_code] == literal[string] :
identifier[bucketExisted] = keyword[False]
identifier[log] . identifier[debug] ( literal[string] , identifier[bucket_name] )
keyword[if] identifier[create] :
identifier[log] . identifier[debug] ( literal[string] , identifier[bucket_name] )
identifier[location] = identifier[region_to_bucket_location] ( identifier[self] . identifier[region] )
identifier[bucket] = identifier[self] . identifier[s3] . identifier[create_bucket] ( identifier[bucket_name] , identifier[location] = identifier[location] )
keyword[assert] identifier[self] . identifier[__getBucketRegion] ( identifier[bucket] )== identifier[self] . identifier[region]
keyword[elif] identifier[block] :
keyword[raise]
keyword[else] :
keyword[return] keyword[None]
keyword[elif] identifier[e] . identifier[status] == literal[int] :
identifier[bucket] = identifier[self] . identifier[s3] . identifier[get_bucket] ( identifier[bucket_name] , identifier[validate] = keyword[False] )
keyword[raise] identifier[BucketLocationConflictException] ( identifier[self] . identifier[__getBucketRegion] ( identifier[bucket] ))
keyword[else] :
keyword[raise]
keyword[else] :
keyword[if] identifier[self] . identifier[__getBucketRegion] ( identifier[bucket] )!= identifier[self] . identifier[region] :
keyword[raise] identifier[BucketLocationConflictException] ( identifier[self] . identifier[__getBucketRegion] ( identifier[bucket] ))
keyword[if] identifier[versioning] keyword[and] keyword[not] identifier[bucketExisted] :
identifier[bucket] . identifier[configure_versioning] ( keyword[True] )
keyword[else] :
identifier[bucket_versioning] = identifier[self] . identifier[__getBucketVersioning] ( identifier[bucket] )
keyword[if] identifier[bucket_versioning] != identifier[versioning] :
keyword[assert] keyword[False] , literal[string]
keyword[elif] identifier[bucket_versioning] keyword[is] keyword[None] :
keyword[assert] keyword[False] , literal[string]
keyword[if] identifier[bucketExisted] :
identifier[log] . identifier[debug] ( literal[string] , identifier[bucket_name] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] , identifier[bucket_name] )
keyword[return] identifier[bucket] | def _bindBucket(self, bucket_name, create=False, block=True, versioning=False):
"""
Return the Boto Bucket object representing the S3 bucket with the given name. If the
bucket does not exist and `create` is True, it will be created.
:param str bucket_name: the name of the bucket to bind to
:param bool create: Whether to create bucket the if it doesn't exist
:param bool block: If False, return None if the bucket doesn't exist. If True, wait until
bucket appears. Ignored if `create` is True.
:rtype: Bucket|None
:raises S3ResponseError: If `block` is True and the bucket still doesn't exist after the
retry timeout expires.
"""
assert self.minBucketNameLen <= len(bucket_name) <= self.maxBucketNameLen
assert self.bucketNameRe.match(bucket_name)
log.debug("Binding to job store bucket '%s'.", bucket_name)
def bucket_creation_pending(e):
# https://github.com/BD2KGenomics/toil/issues/955
# https://github.com/BD2KGenomics/toil/issues/995
# https://github.com/BD2KGenomics/toil/issues/1093
return isinstance(e, (S3CreateError, S3ResponseError)) and e.error_code in ('BucketAlreadyOwnedByYou', 'OperationAborted')
bucketExisted = True
for attempt in retry_s3(predicate=bucket_creation_pending):
with attempt:
try:
bucket = self.s3.get_bucket(bucket_name, validate=True) # depends on [control=['try'], data=[]]
except S3ResponseError as e:
if e.error_code == 'NoSuchBucket':
bucketExisted = False
log.debug("Bucket '%s' does not exist.", bucket_name)
if create:
log.debug("Creating bucket '%s'.", bucket_name)
location = region_to_bucket_location(self.region)
bucket = self.s3.create_bucket(bucket_name, location=location)
assert self.__getBucketRegion(bucket) == self.region # depends on [control=['if'], data=[]]
elif block:
raise # depends on [control=['if'], data=[]]
else:
return None # depends on [control=['if'], data=[]]
elif e.status == 301:
# This is raised if the user attempts to get a bucket in a region outside
# the specified one, if the specified one is not `us-east-1`. The us-east-1
# server allows a user to use buckets from any region.
bucket = self.s3.get_bucket(bucket_name, validate=False)
raise BucketLocationConflictException(self.__getBucketRegion(bucket)) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']]
else:
if self.__getBucketRegion(bucket) != self.region:
raise BucketLocationConflictException(self.__getBucketRegion(bucket)) # depends on [control=['if'], data=[]]
if versioning and (not bucketExisted):
# only call this method on bucket creation
bucket.configure_versioning(True) # depends on [control=['if'], data=[]]
else:
# now test for versioning consistency
# we should never see any of these errors since 'versioning' should always be true
bucket_versioning = self.__getBucketVersioning(bucket)
if bucket_versioning != versioning:
assert False, 'Cannot modify versioning on existing bucket' # depends on [control=['if'], data=[]]
elif bucket_versioning is None:
assert False, 'Cannot use a bucket with versioning suspended' # depends on [control=['if'], data=[]]
if bucketExisted:
log.debug("Using pre-existing job store bucket '%s'.", bucket_name) # depends on [control=['if'], data=[]]
else:
log.debug("Created new job store bucket '%s'.", bucket_name)
return bucket # depends on [control=['with'], data=[]] # depends on [control=['for'], data=['attempt']] |
def _load_methods(package):
"""Loads the mappings from method call result to analysis.
Args:
package (str): name of the package to load for.
"""
global _methods
_methods[package] = None
from acorn.config import settings
from acorn.logging.descriptors import _obj_getattr
spack = settings(package)
if spack is not None:
if spack.has_section("analysis.methods"):
_methods[package] = {}
from importlib import import_module
mappings = dict(spack.items("analysis.methods"))
for fqdn, target in mappings.items():
rootname = target.split('.')[0]
root = import_module(rootname)
caller = _obj_getattr(root, target)
_methods[package][fqdn] = caller | def function[_load_methods, parameter[package]]:
constant[Loads the mappings from method call result to analysis.
Args:
package (str): name of the package to load for.
]
<ast.Global object at 0x7da1b14d0790>
call[name[_methods]][name[package]] assign[=] constant[None]
from relative_module[acorn.config] import module[settings]
from relative_module[acorn.logging.descriptors] import module[_obj_getattr]
variable[spack] assign[=] call[name[settings], parameter[name[package]]]
if compare[name[spack] is_not constant[None]] begin[:]
if call[name[spack].has_section, parameter[constant[analysis.methods]]] begin[:]
call[name[_methods]][name[package]] assign[=] dictionary[[], []]
from relative_module[importlib] import module[import_module]
variable[mappings] assign[=] call[name[dict], parameter[call[name[spack].items, parameter[constant[analysis.methods]]]]]
for taget[tuple[[<ast.Name object at 0x7da18f7237f0>, <ast.Name object at 0x7da18f722a10>]]] in starred[call[name[mappings].items, parameter[]]] begin[:]
variable[rootname] assign[=] call[call[name[target].split, parameter[constant[.]]]][constant[0]]
variable[root] assign[=] call[name[import_module], parameter[name[rootname]]]
variable[caller] assign[=] call[name[_obj_getattr], parameter[name[root], name[target]]]
call[call[name[_methods]][name[package]]][name[fqdn]] assign[=] name[caller] | keyword[def] identifier[_load_methods] ( identifier[package] ):
literal[string]
keyword[global] identifier[_methods]
identifier[_methods] [ identifier[package] ]= keyword[None]
keyword[from] identifier[acorn] . identifier[config] keyword[import] identifier[settings]
keyword[from] identifier[acorn] . identifier[logging] . identifier[descriptors] keyword[import] identifier[_obj_getattr]
identifier[spack] = identifier[settings] ( identifier[package] )
keyword[if] identifier[spack] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[spack] . identifier[has_section] ( literal[string] ):
identifier[_methods] [ identifier[package] ]={}
keyword[from] identifier[importlib] keyword[import] identifier[import_module]
identifier[mappings] = identifier[dict] ( identifier[spack] . identifier[items] ( literal[string] ))
keyword[for] identifier[fqdn] , identifier[target] keyword[in] identifier[mappings] . identifier[items] ():
identifier[rootname] = identifier[target] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[root] = identifier[import_module] ( identifier[rootname] )
identifier[caller] = identifier[_obj_getattr] ( identifier[root] , identifier[target] )
identifier[_methods] [ identifier[package] ][ identifier[fqdn] ]= identifier[caller] | def _load_methods(package):
"""Loads the mappings from method call result to analysis.
Args:
package (str): name of the package to load for.
"""
global _methods
_methods[package] = None
from acorn.config import settings
from acorn.logging.descriptors import _obj_getattr
spack = settings(package)
if spack is not None:
if spack.has_section('analysis.methods'):
_methods[package] = {}
from importlib import import_module
mappings = dict(spack.items('analysis.methods'))
for (fqdn, target) in mappings.items():
rootname = target.split('.')[0]
root = import_module(rootname)
caller = _obj_getattr(root, target)
_methods[package][fqdn] = caller # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['spack']] |
def merge_overlapping_in_list(l):
'''Sorts list, merges any overlapping intervals, and also adjacent intervals. e.g.
[0,1], [1,2] would be merge to [0,.2].'''
i = 0
l.sort()
while i < len(l) - 1:
u = l[i].union(l[i+1])
if u is not None:
l[i] = u
l.pop(i+1)
else:
i += 1 | def function[merge_overlapping_in_list, parameter[l]]:
constant[Sorts list, merges any overlapping intervals, and also adjacent intervals. e.g.
[0,1], [1,2] would be merge to [0,.2].]
variable[i] assign[=] constant[0]
call[name[l].sort, parameter[]]
while compare[name[i] less[<] binary_operation[call[name[len], parameter[name[l]]] - constant[1]]] begin[:]
variable[u] assign[=] call[call[name[l]][name[i]].union, parameter[call[name[l]][binary_operation[name[i] + constant[1]]]]]
if compare[name[u] is_not constant[None]] begin[:]
call[name[l]][name[i]] assign[=] name[u]
call[name[l].pop, parameter[binary_operation[name[i] + constant[1]]]] | keyword[def] identifier[merge_overlapping_in_list] ( identifier[l] ):
literal[string]
identifier[i] = literal[int]
identifier[l] . identifier[sort] ()
keyword[while] identifier[i] < identifier[len] ( identifier[l] )- literal[int] :
identifier[u] = identifier[l] [ identifier[i] ]. identifier[union] ( identifier[l] [ identifier[i] + literal[int] ])
keyword[if] identifier[u] keyword[is] keyword[not] keyword[None] :
identifier[l] [ identifier[i] ]= identifier[u]
identifier[l] . identifier[pop] ( identifier[i] + literal[int] )
keyword[else] :
identifier[i] += literal[int] | def merge_overlapping_in_list(l):
"""Sorts list, merges any overlapping intervals, and also adjacent intervals. e.g.
[0,1], [1,2] would be merge to [0,.2]."""
i = 0
l.sort()
while i < len(l) - 1:
u = l[i].union(l[i + 1])
if u is not None:
l[i] = u
l.pop(i + 1) # depends on [control=['if'], data=['u']]
else:
i += 1 # depends on [control=['while'], data=['i']] |
def scrollContentsBy(self, dx, dy):
"""Override from QAbstractScrollArea.
Called when the scroll bars are adjusted by the user.
"""
if self._adjusting:
return
self._scrolling = True
try:
bd = self.viewer.get_bindings()
res = bd.calc_pan_pct(self.viewer, pad=self.pad)
if res is None:
return
pct_x, pct_y = res.pan_pct_x, res.pan_pct_y
# Only adjust pan setting for axes that have changed
if dx != 0:
hsb = self.horizontalScrollBar()
pos_x = float(hsb.value())
pct_x = pos_x / float(self.upper_h)
if dy != 0:
vsb = self.verticalScrollBar()
pos_y = float(vsb.value())
# invert Y pct because of orientation of scrollbar
pct_y = 1.0 - (pos_y / float(self.upper_v))
bd = self.viewer.get_bindings()
bd.pan_by_pct(self.viewer, pct_x, pct_y, pad=self.pad)
# This shouldn't be necessary, but seems to be
self.viewer.redraw(whence=0)
finally:
self._scrolling = False | def function[scrollContentsBy, parameter[self, dx, dy]]:
constant[Override from QAbstractScrollArea.
Called when the scroll bars are adjusted by the user.
]
if name[self]._adjusting begin[:]
return[None]
name[self]._scrolling assign[=] constant[True]
<ast.Try object at 0x7da2041d8250> | keyword[def] identifier[scrollContentsBy] ( identifier[self] , identifier[dx] , identifier[dy] ):
literal[string]
keyword[if] identifier[self] . identifier[_adjusting] :
keyword[return]
identifier[self] . identifier[_scrolling] = keyword[True]
keyword[try] :
identifier[bd] = identifier[self] . identifier[viewer] . identifier[get_bindings] ()
identifier[res] = identifier[bd] . identifier[calc_pan_pct] ( identifier[self] . identifier[viewer] , identifier[pad] = identifier[self] . identifier[pad] )
keyword[if] identifier[res] keyword[is] keyword[None] :
keyword[return]
identifier[pct_x] , identifier[pct_y] = identifier[res] . identifier[pan_pct_x] , identifier[res] . identifier[pan_pct_y]
keyword[if] identifier[dx] != literal[int] :
identifier[hsb] = identifier[self] . identifier[horizontalScrollBar] ()
identifier[pos_x] = identifier[float] ( identifier[hsb] . identifier[value] ())
identifier[pct_x] = identifier[pos_x] / identifier[float] ( identifier[self] . identifier[upper_h] )
keyword[if] identifier[dy] != literal[int] :
identifier[vsb] = identifier[self] . identifier[verticalScrollBar] ()
identifier[pos_y] = identifier[float] ( identifier[vsb] . identifier[value] ())
identifier[pct_y] = literal[int] -( identifier[pos_y] / identifier[float] ( identifier[self] . identifier[upper_v] ))
identifier[bd] = identifier[self] . identifier[viewer] . identifier[get_bindings] ()
identifier[bd] . identifier[pan_by_pct] ( identifier[self] . identifier[viewer] , identifier[pct_x] , identifier[pct_y] , identifier[pad] = identifier[self] . identifier[pad] )
identifier[self] . identifier[viewer] . identifier[redraw] ( identifier[whence] = literal[int] )
keyword[finally] :
identifier[self] . identifier[_scrolling] = keyword[False] | def scrollContentsBy(self, dx, dy):
"""Override from QAbstractScrollArea.
Called when the scroll bars are adjusted by the user.
"""
if self._adjusting:
return # depends on [control=['if'], data=[]]
self._scrolling = True
try:
bd = self.viewer.get_bindings()
res = bd.calc_pan_pct(self.viewer, pad=self.pad)
if res is None:
return # depends on [control=['if'], data=[]]
(pct_x, pct_y) = (res.pan_pct_x, res.pan_pct_y)
# Only adjust pan setting for axes that have changed
if dx != 0:
hsb = self.horizontalScrollBar()
pos_x = float(hsb.value())
pct_x = pos_x / float(self.upper_h) # depends on [control=['if'], data=[]]
if dy != 0:
vsb = self.verticalScrollBar()
pos_y = float(vsb.value())
# invert Y pct because of orientation of scrollbar
pct_y = 1.0 - pos_y / float(self.upper_v) # depends on [control=['if'], data=[]]
bd = self.viewer.get_bindings()
bd.pan_by_pct(self.viewer, pct_x, pct_y, pad=self.pad)
# This shouldn't be necessary, but seems to be
self.viewer.redraw(whence=0) # depends on [control=['try'], data=[]]
finally:
self._scrolling = False |
def convert_example_to_features(example, max_seq_length, tokenizer):
"""
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param example: InputExample, containing sentence input as strings and is_next label
:param max_seq_length: int, maximum length of sequence.
:param tokenizer: Tokenizer
:return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
"""
tokens_a = example.tokens_a
tokens_b = example.tokens_b
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
tokens_a, t1_label = random_word(tokens_a, tokenizer)
tokens_b, t2_label = random_word(tokens_b, tokenizer)
# concatenate lm labels and account for CLS, SEP, SEP
lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1])
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
assert len(tokens_b) > 0
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(lm_label_ids) == max_seq_length
if example.guid < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("LM label: %s " % (lm_label_ids))
logger.info("Is next sentence label: %s " % (example.is_next))
features = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
is_next=example.is_next)
return features | def function[convert_example_to_features, parameter[example, max_seq_length, tokenizer]]:
constant[
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param example: InputExample, containing sentence input as strings and is_next label
:param max_seq_length: int, maximum length of sequence.
:param tokenizer: Tokenizer
:return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
]
variable[tokens_a] assign[=] name[example].tokens_a
variable[tokens_b] assign[=] name[example].tokens_b
call[name[_truncate_seq_pair], parameter[name[tokens_a], name[tokens_b], binary_operation[name[max_seq_length] - constant[3]]]]
<ast.Tuple object at 0x7da20e9546a0> assign[=] call[name[random_word], parameter[name[tokens_a], name[tokenizer]]]
<ast.Tuple object at 0x7da20e9563e0> assign[=] call[name[random_word], parameter[name[tokens_b], name[tokenizer]]]
variable[lm_label_ids] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[list[[<ast.UnaryOp object at 0x7da20e955120>]] + name[t1_label]] + list[[<ast.UnaryOp object at 0x7da20e957ac0>]]] + name[t2_label]] + list[[<ast.UnaryOp object at 0x7da20e954340>]]]
variable[tokens] assign[=] list[[]]
variable[segment_ids] assign[=] list[[]]
call[name[tokens].append, parameter[constant[[CLS]]]]
call[name[segment_ids].append, parameter[constant[0]]]
for taget[name[token]] in starred[name[tokens_a]] begin[:]
call[name[tokens].append, parameter[name[token]]]
call[name[segment_ids].append, parameter[constant[0]]]
call[name[tokens].append, parameter[constant[[SEP]]]]
call[name[segment_ids].append, parameter[constant[0]]]
assert[compare[call[name[len], parameter[name[tokens_b]]] greater[>] constant[0]]]
for taget[name[token]] in starred[name[tokens_b]] begin[:]
call[name[tokens].append, parameter[name[token]]]
call[name[segment_ids].append, parameter[constant[1]]]
call[name[tokens].append, parameter[constant[[SEP]]]]
call[name[segment_ids].append, parameter[constant[1]]]
variable[input_ids] assign[=] call[name[tokenizer].convert_tokens_to_ids, parameter[name[tokens]]]
variable[input_mask] assign[=] binary_operation[list[[<ast.Constant object at 0x7da207f9ae60>]] * call[name[len], parameter[name[input_ids]]]]
while compare[call[name[len], parameter[name[input_ids]]] less[<] name[max_seq_length]] begin[:]
call[name[input_ids].append, parameter[constant[0]]]
call[name[input_mask].append, parameter[constant[0]]]
call[name[segment_ids].append, parameter[constant[0]]]
call[name[lm_label_ids].append, parameter[<ast.UnaryOp object at 0x7da1b20b44f0>]]
assert[compare[call[name[len], parameter[name[input_ids]]] equal[==] name[max_seq_length]]]
assert[compare[call[name[len], parameter[name[input_mask]]] equal[==] name[max_seq_length]]]
assert[compare[call[name[len], parameter[name[segment_ids]]] equal[==] name[max_seq_length]]]
assert[compare[call[name[len], parameter[name[lm_label_ids]]] equal[==] name[max_seq_length]]]
if compare[name[example].guid less[<] constant[5]] begin[:]
call[name[logger].info, parameter[constant[*** Example ***]]]
call[name[logger].info, parameter[binary_operation[constant[guid: %s] <ast.Mod object at 0x7da2590d6920> name[example].guid]]]
call[name[logger].info, parameter[binary_operation[constant[tokens: %s] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[<ast.ListComp object at 0x7da2047e9ff0>]]]]]
call[name[logger].info, parameter[binary_operation[constant[input_ids: %s] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[<ast.ListComp object at 0x7da2047eb8e0>]]]]]
call[name[logger].info, parameter[binary_operation[constant[input_mask: %s] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[<ast.ListComp object at 0x7da2047ead70>]]]]]
call[name[logger].info, parameter[binary_operation[constant[segment_ids: %s] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[<ast.ListComp object at 0x7da2047e8c40>]]]]]
call[name[logger].info, parameter[binary_operation[constant[LM label: %s ] <ast.Mod object at 0x7da2590d6920> name[lm_label_ids]]]]
call[name[logger].info, parameter[binary_operation[constant[Is next sentence label: %s ] <ast.Mod object at 0x7da2590d6920> name[example].is_next]]]
variable[features] assign[=] call[name[InputFeatures], parameter[]]
return[name[features]] | keyword[def] identifier[convert_example_to_features] ( identifier[example] , identifier[max_seq_length] , identifier[tokenizer] ):
literal[string]
identifier[tokens_a] = identifier[example] . identifier[tokens_a]
identifier[tokens_b] = identifier[example] . identifier[tokens_b]
identifier[_truncate_seq_pair] ( identifier[tokens_a] , identifier[tokens_b] , identifier[max_seq_length] - literal[int] )
identifier[tokens_a] , identifier[t1_label] = identifier[random_word] ( identifier[tokens_a] , identifier[tokenizer] )
identifier[tokens_b] , identifier[t2_label] = identifier[random_word] ( identifier[tokens_b] , identifier[tokenizer] )
identifier[lm_label_ids] =([- literal[int] ]+ identifier[t1_label] +[- literal[int] ]+ identifier[t2_label] +[- literal[int] ])
identifier[tokens] =[]
identifier[segment_ids] =[]
identifier[tokens] . identifier[append] ( literal[string] )
identifier[segment_ids] . identifier[append] ( literal[int] )
keyword[for] identifier[token] keyword[in] identifier[tokens_a] :
identifier[tokens] . identifier[append] ( identifier[token] )
identifier[segment_ids] . identifier[append] ( literal[int] )
identifier[tokens] . identifier[append] ( literal[string] )
identifier[segment_ids] . identifier[append] ( literal[int] )
keyword[assert] identifier[len] ( identifier[tokens_b] )> literal[int]
keyword[for] identifier[token] keyword[in] identifier[tokens_b] :
identifier[tokens] . identifier[append] ( identifier[token] )
identifier[segment_ids] . identifier[append] ( literal[int] )
identifier[tokens] . identifier[append] ( literal[string] )
identifier[segment_ids] . identifier[append] ( literal[int] )
identifier[input_ids] = identifier[tokenizer] . identifier[convert_tokens_to_ids] ( identifier[tokens] )
identifier[input_mask] =[ literal[int] ]* identifier[len] ( identifier[input_ids] )
keyword[while] identifier[len] ( identifier[input_ids] )< identifier[max_seq_length] :
identifier[input_ids] . identifier[append] ( literal[int] )
identifier[input_mask] . identifier[append] ( literal[int] )
identifier[segment_ids] . identifier[append] ( literal[int] )
identifier[lm_label_ids] . identifier[append] (- literal[int] )
keyword[assert] identifier[len] ( identifier[input_ids] )== identifier[max_seq_length]
keyword[assert] identifier[len] ( identifier[input_mask] )== identifier[max_seq_length]
keyword[assert] identifier[len] ( identifier[segment_ids] )== identifier[max_seq_length]
keyword[assert] identifier[len] ( identifier[lm_label_ids] )== identifier[max_seq_length]
keyword[if] identifier[example] . identifier[guid] < literal[int] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] %( identifier[example] . identifier[guid] ))
identifier[logger] . identifier[info] ( literal[string] % literal[string] . identifier[join] (
[ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[tokens] ]))
identifier[logger] . identifier[info] ( literal[string] % literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[input_ids] ]))
identifier[logger] . identifier[info] ( literal[string] % literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[input_mask] ]))
identifier[logger] . identifier[info] (
literal[string] % literal[string] . identifier[join] ([ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[segment_ids] ]))
identifier[logger] . identifier[info] ( literal[string] %( identifier[lm_label_ids] ))
identifier[logger] . identifier[info] ( literal[string] %( identifier[example] . identifier[is_next] ))
identifier[features] = identifier[InputFeatures] ( identifier[input_ids] = identifier[input_ids] ,
identifier[input_mask] = identifier[input_mask] ,
identifier[segment_ids] = identifier[segment_ids] ,
identifier[lm_label_ids] = identifier[lm_label_ids] ,
identifier[is_next] = identifier[example] . identifier[is_next] )
keyword[return] identifier[features] | def convert_example_to_features(example, max_seq_length, tokenizer):
"""
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param example: InputExample, containing sentence input as strings and is_next label
:param max_seq_length: int, maximum length of sequence.
:param tokenizer: Tokenizer
:return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
"""
tokens_a = example.tokens_a
tokens_b = example.tokens_b
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
(tokens_a, t1_label) = random_word(tokens_a, tokenizer)
(tokens_b, t2_label) = random_word(tokens_b, tokenizer)
# concatenate lm labels and account for CLS, SEP, SEP
lm_label_ids = [-1] + t1_label + [-1] + t2_label + [-1]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0) # depends on [control=['for'], data=['token']]
tokens.append('[SEP]')
segment_ids.append(0)
assert len(tokens_b) > 0
for token in tokens_b:
tokens.append(token)
segment_ids.append(1) # depends on [control=['for'], data=['token']]
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1) # depends on [control=['while'], data=[]]
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(lm_label_ids) == max_seq_length
if example.guid < 5:
logger.info('*** Example ***')
logger.info('guid: %s' % example.guid)
logger.info('tokens: %s' % ' '.join([str(x) for x in tokens]))
logger.info('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
logger.info('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
logger.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids]))
logger.info('LM label: %s ' % lm_label_ids)
logger.info('Is next sentence label: %s ' % example.is_next) # depends on [control=['if'], data=[]]
features = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, lm_label_ids=lm_label_ids, is_next=example.is_next)
return features |
def compare_two(data1, data2, test=StatTests.ks):
'''Compares two distributions of data
and assess two scores: a distance between them
and a probability they are drawn from the same
distribution.
Parameters:
data1: numpy array of dataset 1
data2: numpy array of dataset 2
test: Stat_tests\
Defines the statistical test to be used, based\
on the scipy available modules.\
Accepted tests: ks_2samp, wilcoxon, ttest
Returns:
dist: float\
High numbers define high dissimilarity between the two datasets
p-value: float\
Small numbers define high probability the data come from\
same dataset.
'''
results = getattr(_st, get_test(test))(data1, data2)
Stats = namedtuple('Stats', ['dist', 'pvalue'])
return Stats(*results) | def function[compare_two, parameter[data1, data2, test]]:
constant[Compares two distributions of data
and assess two scores: a distance between them
and a probability they are drawn from the same
distribution.
Parameters:
data1: numpy array of dataset 1
data2: numpy array of dataset 2
test: Stat_tests Defines the statistical test to be used, based on the scipy available modules. Accepted tests: ks_2samp, wilcoxon, ttest
Returns:
dist: float High numbers define high dissimilarity between the two datasets
p-value: float Small numbers define high probability the data come from same dataset.
]
variable[results] assign[=] call[call[name[getattr], parameter[name[_st], call[name[get_test], parameter[name[test]]]]], parameter[name[data1], name[data2]]]
variable[Stats] assign[=] call[name[namedtuple], parameter[constant[Stats], list[[<ast.Constant object at 0x7da20e9633a0>, <ast.Constant object at 0x7da20e963280>]]]]
return[call[name[Stats], parameter[<ast.Starred object at 0x7da20e962e30>]]] | keyword[def] identifier[compare_two] ( identifier[data1] , identifier[data2] , identifier[test] = identifier[StatTests] . identifier[ks] ):
literal[string]
identifier[results] = identifier[getattr] ( identifier[_st] , identifier[get_test] ( identifier[test] ))( identifier[data1] , identifier[data2] )
identifier[Stats] = identifier[namedtuple] ( literal[string] ,[ literal[string] , literal[string] ])
keyword[return] identifier[Stats] (* identifier[results] ) | def compare_two(data1, data2, test=StatTests.ks):
"""Compares two distributions of data
and assess two scores: a distance between them
and a probability they are drawn from the same
distribution.
Parameters:
data1: numpy array of dataset 1
data2: numpy array of dataset 2
test: Stat_tests Defines the statistical test to be used, based on the scipy available modules. Accepted tests: ks_2samp, wilcoxon, ttest
Returns:
dist: float High numbers define high dissimilarity between the two datasets
p-value: float Small numbers define high probability the data come from same dataset.
"""
results = getattr(_st, get_test(test))(data1, data2)
Stats = namedtuple('Stats', ['dist', 'pvalue'])
return Stats(*results) |
def get_name(self, name_case=DdlParseBase.NAME_CASE.original):
"""
Get Name converted case
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: name
"""
if name_case == self.NAME_CASE.lower:
return self._name.lower()
elif name_case == self.NAME_CASE.upper:
return self._name.upper()
else:
return self._name | def function[get_name, parameter[self, name_case]]:
constant[
Get Name converted case
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: name
]
if compare[name[name_case] equal[==] name[self].NAME_CASE.lower] begin[:]
return[call[name[self]._name.lower, parameter[]]] | keyword[def] identifier[get_name] ( identifier[self] , identifier[name_case] = identifier[DdlParseBase] . identifier[NAME_CASE] . identifier[original] ):
literal[string]
keyword[if] identifier[name_case] == identifier[self] . identifier[NAME_CASE] . identifier[lower] :
keyword[return] identifier[self] . identifier[_name] . identifier[lower] ()
keyword[elif] identifier[name_case] == identifier[self] . identifier[NAME_CASE] . identifier[upper] :
keyword[return] identifier[self] . identifier[_name] . identifier[upper] ()
keyword[else] :
keyword[return] identifier[self] . identifier[_name] | def get_name(self, name_case=DdlParseBase.NAME_CASE.original):
"""
Get Name converted case
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: name
"""
if name_case == self.NAME_CASE.lower:
return self._name.lower() # depends on [control=['if'], data=[]]
elif name_case == self.NAME_CASE.upper:
return self._name.upper() # depends on [control=['if'], data=[]]
else:
return self._name |
def abort(cls, mapreduce_id, **kwargs):
"""Causes a job to abort.
Args:
mapreduce_id: The job to abort. Not verified as a valid job.
"""
cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME),
command=cls.ABORT).put(**kwargs) | def function[abort, parameter[cls, mapreduce_id]]:
constant[Causes a job to abort.
Args:
mapreduce_id: The job to abort. Not verified as a valid job.
]
call[call[name[cls], parameter[]].put, parameter[]] | keyword[def] identifier[abort] ( identifier[cls] , identifier[mapreduce_id] ,** identifier[kwargs] ):
literal[string]
identifier[cls] ( identifier[key_name] = literal[string] %( identifier[mapreduce_id] , identifier[cls] . identifier[_KEY_NAME] ),
identifier[command] = identifier[cls] . identifier[ABORT] ). identifier[put] (** identifier[kwargs] ) | def abort(cls, mapreduce_id, **kwargs):
"""Causes a job to abort.
Args:
mapreduce_id: The job to abort. Not verified as a valid job.
"""
cls(key_name='%s:%s' % (mapreduce_id, cls._KEY_NAME), command=cls.ABORT).put(**kwargs) |
def validate_minmax_axis(axis):
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
Parameters
----------
axis : int or None
Raises
------
ValueError
"""
ndim = 1 # hard-coded for Index
if axis is None:
return
if axis >= ndim or (axis < 0 and ndim + axis < 0):
raise ValueError("`axis` must be fewer than the number of "
"dimensions ({ndim})".format(ndim=ndim)) | def function[validate_minmax_axis, parameter[axis]]:
constant[
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
Parameters
----------
axis : int or None
Raises
------
ValueError
]
variable[ndim] assign[=] constant[1]
if compare[name[axis] is constant[None]] begin[:]
return[None]
if <ast.BoolOp object at 0x7da20cabe410> begin[:]
<ast.Raise object at 0x7da20cabf160> | keyword[def] identifier[validate_minmax_axis] ( identifier[axis] ):
literal[string]
identifier[ndim] = literal[int]
keyword[if] identifier[axis] keyword[is] keyword[None] :
keyword[return]
keyword[if] identifier[axis] >= identifier[ndim] keyword[or] ( identifier[axis] < literal[int] keyword[and] identifier[ndim] + identifier[axis] < literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[ndim] = identifier[ndim] )) | def validate_minmax_axis(axis):
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
Parameters
----------
axis : int or None
Raises
------
ValueError
"""
ndim = 1 # hard-coded for Index
if axis is None:
return # depends on [control=['if'], data=[]]
if axis >= ndim or (axis < 0 and ndim + axis < 0):
raise ValueError('`axis` must be fewer than the number of dimensions ({ndim})'.format(ndim=ndim)) # depends on [control=['if'], data=[]] |
def hazard_extra_keyword(keyword, feature, parent):
"""Given a keyword, it will return the value of the keyword
from the hazard layer's extra keywords.
For instance:
* hazard_extra_keyword( 'depth' ) -> will return the value of 'depth'
in current hazard layer's extra keywords.
"""
_ = feature, parent # NOQA
hazard_layer_path = QgsExpressionContextUtils. \
projectScope(QgsProject.instance()).variable(
'hazard_layer')
hazard_layer = load_layer(hazard_layer_path)[0]
keywords = KeywordIO.read_keywords(hazard_layer)
extra_keywords = keywords.get('extra_keywords')
if extra_keywords:
value = extra_keywords.get(keyword)
if value:
value_definition = definition(value)
if value_definition:
return value_definition['name']
return value
else:
return tr('Keyword %s is not found' % keyword)
return tr('No extra keywords found') | def function[hazard_extra_keyword, parameter[keyword, feature, parent]]:
constant[Given a keyword, it will return the value of the keyword
from the hazard layer's extra keywords.
For instance:
* hazard_extra_keyword( 'depth' ) -> will return the value of 'depth'
in current hazard layer's extra keywords.
]
variable[_] assign[=] tuple[[<ast.Name object at 0x7da1b0c0fac0>, <ast.Name object at 0x7da1b0c0e3b0>]]
variable[hazard_layer_path] assign[=] call[call[name[QgsExpressionContextUtils].projectScope, parameter[call[name[QgsProject].instance, parameter[]]]].variable, parameter[constant[hazard_layer]]]
variable[hazard_layer] assign[=] call[call[name[load_layer], parameter[name[hazard_layer_path]]]][constant[0]]
variable[keywords] assign[=] call[name[KeywordIO].read_keywords, parameter[name[hazard_layer]]]
variable[extra_keywords] assign[=] call[name[keywords].get, parameter[constant[extra_keywords]]]
if name[extra_keywords] begin[:]
variable[value] assign[=] call[name[extra_keywords].get, parameter[name[keyword]]]
if name[value] begin[:]
variable[value_definition] assign[=] call[name[definition], parameter[name[value]]]
if name[value_definition] begin[:]
return[call[name[value_definition]][constant[name]]]
return[name[value]]
return[call[name[tr], parameter[constant[No extra keywords found]]]] | keyword[def] identifier[hazard_extra_keyword] ( identifier[keyword] , identifier[feature] , identifier[parent] ):
literal[string]
identifier[_] = identifier[feature] , identifier[parent]
identifier[hazard_layer_path] = identifier[QgsExpressionContextUtils] . identifier[projectScope] ( identifier[QgsProject] . identifier[instance] ()). identifier[variable] (
literal[string] )
identifier[hazard_layer] = identifier[load_layer] ( identifier[hazard_layer_path] )[ literal[int] ]
identifier[keywords] = identifier[KeywordIO] . identifier[read_keywords] ( identifier[hazard_layer] )
identifier[extra_keywords] = identifier[keywords] . identifier[get] ( literal[string] )
keyword[if] identifier[extra_keywords] :
identifier[value] = identifier[extra_keywords] . identifier[get] ( identifier[keyword] )
keyword[if] identifier[value] :
identifier[value_definition] = identifier[definition] ( identifier[value] )
keyword[if] identifier[value_definition] :
keyword[return] identifier[value_definition] [ literal[string] ]
keyword[return] identifier[value]
keyword[else] :
keyword[return] identifier[tr] ( literal[string] % identifier[keyword] )
keyword[return] identifier[tr] ( literal[string] ) | def hazard_extra_keyword(keyword, feature, parent):
"""Given a keyword, it will return the value of the keyword
from the hazard layer's extra keywords.
For instance:
* hazard_extra_keyword( 'depth' ) -> will return the value of 'depth'
in current hazard layer's extra keywords.
"""
_ = (feature, parent) # NOQA
hazard_layer_path = QgsExpressionContextUtils.projectScope(QgsProject.instance()).variable('hazard_layer')
hazard_layer = load_layer(hazard_layer_path)[0]
keywords = KeywordIO.read_keywords(hazard_layer)
extra_keywords = keywords.get('extra_keywords')
if extra_keywords:
value = extra_keywords.get(keyword)
if value:
value_definition = definition(value)
if value_definition:
return value_definition['name'] # depends on [control=['if'], data=[]]
return value # depends on [control=['if'], data=[]]
else:
return tr('Keyword %s is not found' % keyword) # depends on [control=['if'], data=[]]
return tr('No extra keywords found') |
def _detect_iplus(self):
"""Check the DCNM version and determine if it's for iplus"""
ver_expr = "([0-9]+)\.([0-9]+)\((.*)\)"
re.compile(ver_expr)
v1 = re.match(ver_expr, self._cur_ver)
v2 = re.match(ver_expr, self._base_ver)
if int(v1.group(1)) > int(v2.group(1)):
self._is_iplus = True
elif int(v1.group(1)) == int(v2.group(1)):
if int(v1.group(2)) > int(v2.group(2)):
self._is_iplus = True
elif int(v1.group(2)) == int(v2.group(2)):
self._is_iplus = v1.group(3) >= v2.group(3)
LOG.info("DCNM version: %(cur_ver)s, iplus: %(is_iplus)s",
{'cur_ver': self._cur_ver, 'is_iplus': self._is_iplus}) | def function[_detect_iplus, parameter[self]]:
constant[Check the DCNM version and determine if it's for iplus]
variable[ver_expr] assign[=] constant[([0-9]+)\.([0-9]+)\((.*)\)]
call[name[re].compile, parameter[name[ver_expr]]]
variable[v1] assign[=] call[name[re].match, parameter[name[ver_expr], name[self]._cur_ver]]
variable[v2] assign[=] call[name[re].match, parameter[name[ver_expr], name[self]._base_ver]]
if compare[call[name[int], parameter[call[name[v1].group, parameter[constant[1]]]]] greater[>] call[name[int], parameter[call[name[v2].group, parameter[constant[1]]]]]] begin[:]
name[self]._is_iplus assign[=] constant[True]
call[name[LOG].info, parameter[constant[DCNM version: %(cur_ver)s, iplus: %(is_iplus)s], dictionary[[<ast.Constant object at 0x7da18dc99390>, <ast.Constant object at 0x7da18dc99120>], [<ast.Attribute object at 0x7da18dc99150>, <ast.Attribute object at 0x7da18dc99870>]]]] | keyword[def] identifier[_detect_iplus] ( identifier[self] ):
literal[string]
identifier[ver_expr] = literal[string]
identifier[re] . identifier[compile] ( identifier[ver_expr] )
identifier[v1] = identifier[re] . identifier[match] ( identifier[ver_expr] , identifier[self] . identifier[_cur_ver] )
identifier[v2] = identifier[re] . identifier[match] ( identifier[ver_expr] , identifier[self] . identifier[_base_ver] )
keyword[if] identifier[int] ( identifier[v1] . identifier[group] ( literal[int] ))> identifier[int] ( identifier[v2] . identifier[group] ( literal[int] )):
identifier[self] . identifier[_is_iplus] = keyword[True]
keyword[elif] identifier[int] ( identifier[v1] . identifier[group] ( literal[int] ))== identifier[int] ( identifier[v2] . identifier[group] ( literal[int] )):
keyword[if] identifier[int] ( identifier[v1] . identifier[group] ( literal[int] ))> identifier[int] ( identifier[v2] . identifier[group] ( literal[int] )):
identifier[self] . identifier[_is_iplus] = keyword[True]
keyword[elif] identifier[int] ( identifier[v1] . identifier[group] ( literal[int] ))== identifier[int] ( identifier[v2] . identifier[group] ( literal[int] )):
identifier[self] . identifier[_is_iplus] = identifier[v1] . identifier[group] ( literal[int] )>= identifier[v2] . identifier[group] ( literal[int] )
identifier[LOG] . identifier[info] ( literal[string] ,
{ literal[string] : identifier[self] . identifier[_cur_ver] , literal[string] : identifier[self] . identifier[_is_iplus] }) | def _detect_iplus(self):
"""Check the DCNM version and determine if it's for iplus"""
ver_expr = '([0-9]+)\\.([0-9]+)\\((.*)\\)'
re.compile(ver_expr)
v1 = re.match(ver_expr, self._cur_ver)
v2 = re.match(ver_expr, self._base_ver)
if int(v1.group(1)) > int(v2.group(1)):
self._is_iplus = True # depends on [control=['if'], data=[]]
elif int(v1.group(1)) == int(v2.group(1)):
if int(v1.group(2)) > int(v2.group(2)):
self._is_iplus = True # depends on [control=['if'], data=[]]
elif int(v1.group(2)) == int(v2.group(2)):
self._is_iplus = v1.group(3) >= v2.group(3) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
LOG.info('DCNM version: %(cur_ver)s, iplus: %(is_iplus)s', {'cur_ver': self._cur_ver, 'is_iplus': self._is_iplus}) |
def compile_kernel(self, openmp=False, assembly=False, verbose=False):
"""
Compile source (from as_code(type_)) to assembly or object and return (fileptr, filename).
Output can be used with Kernel.assemble()
"""
compiler, compiler_args = self._machine.get_compiler()
in_filename = self.get_kernel_code(openmp=openmp, as_filename=True)
if assembly:
compiler_args += ['-S']
suffix = '.s'
else:
suffix = '.o'
out_filename, already_exists = self._get_intermediate_file(
os.path.splitext(os.path.basename(in_filename))[0]+suffix, binary=not assembly, fp=False)
if already_exists:
if verbose:
print('Executing (compile_kernel): ', 'using cached', out_filename)
return out_filename
compiler_args += ['-std=c99']
cmd = ([compiler] +
[in_filename,
'-c',
'-I'+reduce_path(os.path.abspath(os.path.dirname(
os.path.realpath(__file__)))+'/headers/'),
'-o', out_filename] +
compiler_args)
if verbose:
print('Executing (compile_kernel): ', ' '.join(cmd))
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Compilation failed:", e, file=sys.stderr)
sys.exit(1)
# FIXME TODO FIXME TODO FIXME TODO
# Hacky workaround for icc issue (icc may issue vkmovb instructions with AVX512, which are
# invalid and should be kmovb):
if compiler == 'icc' and assembly:
with open(out_filename, 'r+') as f:
assembly = f.read()
f.seek(0)
f.write(assembly.replace('vkmovb', 'kmovb'))
f.truncate()
# FIXME TODO FIXME TODO FIXME TODO
# Let's return the out_file name
return out_filename | def function[compile_kernel, parameter[self, openmp, assembly, verbose]]:
constant[
Compile source (from as_code(type_)) to assembly or object and return (fileptr, filename).
Output can be used with Kernel.assemble()
]
<ast.Tuple object at 0x7da18bc70130> assign[=] call[name[self]._machine.get_compiler, parameter[]]
variable[in_filename] assign[=] call[name[self].get_kernel_code, parameter[]]
if name[assembly] begin[:]
<ast.AugAssign object at 0x7da18bc72530>
variable[suffix] assign[=] constant[.s]
<ast.Tuple object at 0x7da20c6c5ae0> assign[=] call[name[self]._get_intermediate_file, parameter[binary_operation[call[call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[name[in_filename]]]]]][constant[0]] + name[suffix]]]]
if name[already_exists] begin[:]
if name[verbose] begin[:]
call[name[print], parameter[constant[Executing (compile_kernel): ], constant[using cached], name[out_filename]]]
return[name[out_filename]]
<ast.AugAssign object at 0x7da20c6c4640>
variable[cmd] assign[=] binary_operation[binary_operation[list[[<ast.Name object at 0x7da20c6c4f40>]] + list[[<ast.Name object at 0x7da20c6c7460>, <ast.Constant object at 0x7da20c6c5ba0>, <ast.BinOp object at 0x7da20c6c5000>, <ast.Constant object at 0x7da20c6c7040>, <ast.Name object at 0x7da20c6c6320>]]] + name[compiler_args]]
if name[verbose] begin[:]
call[name[print], parameter[constant[Executing (compile_kernel): ], call[constant[ ].join, parameter[name[cmd]]]]]
<ast.Try object at 0x7da20c6c76a0>
if <ast.BoolOp object at 0x7da18bc72a40> begin[:]
with call[name[open], parameter[name[out_filename], constant[r+]]] begin[:]
variable[assembly] assign[=] call[name[f].read, parameter[]]
call[name[f].seek, parameter[constant[0]]]
call[name[f].write, parameter[call[name[assembly].replace, parameter[constant[vkmovb], constant[kmovb]]]]]
call[name[f].truncate, parameter[]]
return[name[out_filename]] | keyword[def] identifier[compile_kernel] ( identifier[self] , identifier[openmp] = keyword[False] , identifier[assembly] = keyword[False] , identifier[verbose] = keyword[False] ):
literal[string]
identifier[compiler] , identifier[compiler_args] = identifier[self] . identifier[_machine] . identifier[get_compiler] ()
identifier[in_filename] = identifier[self] . identifier[get_kernel_code] ( identifier[openmp] = identifier[openmp] , identifier[as_filename] = keyword[True] )
keyword[if] identifier[assembly] :
identifier[compiler_args] +=[ literal[string] ]
identifier[suffix] = literal[string]
keyword[else] :
identifier[suffix] = literal[string]
identifier[out_filename] , identifier[already_exists] = identifier[self] . identifier[_get_intermediate_file] (
identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[in_filename] ))[ literal[int] ]+ identifier[suffix] , identifier[binary] = keyword[not] identifier[assembly] , identifier[fp] = keyword[False] )
keyword[if] identifier[already_exists] :
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] , literal[string] , identifier[out_filename] )
keyword[return] identifier[out_filename]
identifier[compiler_args] +=[ literal[string] ]
identifier[cmd] =([ identifier[compiler] ]+
[ identifier[in_filename] ,
literal[string] ,
literal[string] + identifier[reduce_path] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[dirname] (
identifier[os] . identifier[path] . identifier[realpath] ( identifier[__file__] )))+ literal[string] ),
literal[string] , identifier[out_filename] ]+
identifier[compiler_args] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] , literal[string] . identifier[join] ( identifier[cmd] ))
keyword[try] :
identifier[subprocess] . identifier[check_output] ( identifier[cmd] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[e] :
identifier[print] ( literal[string] , identifier[e] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[compiler] == literal[string] keyword[and] identifier[assembly] :
keyword[with] identifier[open] ( identifier[out_filename] , literal[string] ) keyword[as] identifier[f] :
identifier[assembly] = identifier[f] . identifier[read] ()
identifier[f] . identifier[seek] ( literal[int] )
identifier[f] . identifier[write] ( identifier[assembly] . identifier[replace] ( literal[string] , literal[string] ))
identifier[f] . identifier[truncate] ()
keyword[return] identifier[out_filename] | def compile_kernel(self, openmp=False, assembly=False, verbose=False):
"""
Compile source (from as_code(type_)) to assembly or object and return (fileptr, filename).
Output can be used with Kernel.assemble()
"""
(compiler, compiler_args) = self._machine.get_compiler()
in_filename = self.get_kernel_code(openmp=openmp, as_filename=True)
if assembly:
compiler_args += ['-S']
suffix = '.s' # depends on [control=['if'], data=[]]
else:
suffix = '.o'
(out_filename, already_exists) = self._get_intermediate_file(os.path.splitext(os.path.basename(in_filename))[0] + suffix, binary=not assembly, fp=False)
if already_exists:
if verbose:
print('Executing (compile_kernel): ', 'using cached', out_filename) # depends on [control=['if'], data=[]]
return out_filename # depends on [control=['if'], data=[]]
compiler_args += ['-std=c99']
cmd = [compiler] + [in_filename, '-c', '-I' + reduce_path(os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/headers/'), '-o', out_filename] + compiler_args
if verbose:
print('Executing (compile_kernel): ', ' '.join(cmd)) # depends on [control=['if'], data=[]]
try:
subprocess.check_output(cmd) # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError as e:
print('Compilation failed:', e, file=sys.stderr)
sys.exit(1) # depends on [control=['except'], data=['e']]
# FIXME TODO FIXME TODO FIXME TODO
# Hacky workaround for icc issue (icc may issue vkmovb instructions with AVX512, which are
# invalid and should be kmovb):
if compiler == 'icc' and assembly:
with open(out_filename, 'r+') as f:
assembly = f.read()
f.seek(0)
f.write(assembly.replace('vkmovb', 'kmovb'))
f.truncate() # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
# FIXME TODO FIXME TODO FIXME TODO
# Let's return the out_file name
return out_filename |
def get_queryset(self, request):
"""
Make special filtering by user's permissions.
"""
if not request.user.has_perm('zinnia.can_view_all'):
queryset = self.model.objects.filter(authors__pk=request.user.pk)
else:
queryset = super(EntryAdmin, self).get_queryset(request)
return queryset.prefetch_related('categories', 'authors', 'sites') | def function[get_queryset, parameter[self, request]]:
constant[
Make special filtering by user's permissions.
]
if <ast.UnaryOp object at 0x7da1b1d88df0> begin[:]
variable[queryset] assign[=] call[name[self].model.objects.filter, parameter[]]
return[call[name[queryset].prefetch_related, parameter[constant[categories], constant[authors], constant[sites]]]] | keyword[def] identifier[get_queryset] ( identifier[self] , identifier[request] ):
literal[string]
keyword[if] keyword[not] identifier[request] . identifier[user] . identifier[has_perm] ( literal[string] ):
identifier[queryset] = identifier[self] . identifier[model] . identifier[objects] . identifier[filter] ( identifier[authors__pk] = identifier[request] . identifier[user] . identifier[pk] )
keyword[else] :
identifier[queryset] = identifier[super] ( identifier[EntryAdmin] , identifier[self] ). identifier[get_queryset] ( identifier[request] )
keyword[return] identifier[queryset] . identifier[prefetch_related] ( literal[string] , literal[string] , literal[string] ) | def get_queryset(self, request):
"""
Make special filtering by user's permissions.
"""
if not request.user.has_perm('zinnia.can_view_all'):
queryset = self.model.objects.filter(authors__pk=request.user.pk) # depends on [control=['if'], data=[]]
else:
queryset = super(EntryAdmin, self).get_queryset(request)
return queryset.prefetch_related('categories', 'authors', 'sites') |
def return_socket(self, sock_info):
"""Return the socket to the pool, or if it's closed discard it."""
if self.pid != os.getpid():
self.reset()
else:
if sock_info.pool_id != self.pool_id:
sock_info.close()
elif not sock_info.closed:
sock_info.last_checkin = _time()
with self.lock:
self.sockets.add(sock_info)
self._socket_semaphore.release()
with self.lock:
self.active_sockets -= 1 | def function[return_socket, parameter[self, sock_info]]:
constant[Return the socket to the pool, or if it's closed discard it.]
if compare[name[self].pid not_equal[!=] call[name[os].getpid, parameter[]]] begin[:]
call[name[self].reset, parameter[]]
call[name[self]._socket_semaphore.release, parameter[]]
with name[self].lock begin[:]
<ast.AugAssign object at 0x7da18dc9ae30> | keyword[def] identifier[return_socket] ( identifier[self] , identifier[sock_info] ):
literal[string]
keyword[if] identifier[self] . identifier[pid] != identifier[os] . identifier[getpid] ():
identifier[self] . identifier[reset] ()
keyword[else] :
keyword[if] identifier[sock_info] . identifier[pool_id] != identifier[self] . identifier[pool_id] :
identifier[sock_info] . identifier[close] ()
keyword[elif] keyword[not] identifier[sock_info] . identifier[closed] :
identifier[sock_info] . identifier[last_checkin] = identifier[_time] ()
keyword[with] identifier[self] . identifier[lock] :
identifier[self] . identifier[sockets] . identifier[add] ( identifier[sock_info] )
identifier[self] . identifier[_socket_semaphore] . identifier[release] ()
keyword[with] identifier[self] . identifier[lock] :
identifier[self] . identifier[active_sockets] -= literal[int] | def return_socket(self, sock_info):
"""Return the socket to the pool, or if it's closed discard it."""
if self.pid != os.getpid():
self.reset() # depends on [control=['if'], data=[]]
elif sock_info.pool_id != self.pool_id:
sock_info.close() # depends on [control=['if'], data=[]]
elif not sock_info.closed:
sock_info.last_checkin = _time()
with self.lock:
self.sockets.add(sock_info) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
self._socket_semaphore.release()
with self.lock:
self.active_sockets -= 1 # depends on [control=['with'], data=[]] |
def show_xyzs(self, xs, ys, zs, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`."
if self._square_show_res:
title = 'Ground truth\nPredictions'
rows = int(np.ceil(math.sqrt(len(xs))))
axs = subplots(rows, rows, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=12)
for x,y,z,ax in zip(xs,ys,zs,axs.flatten()): x.show(ax=ax, title=f'{str(y)}\n{str(z)}', **kwargs)
for ax in axs.flatten()[len(xs):]: ax.axis('off')
else:
title = 'Ground truth/Predictions'
axs = subplots(len(xs), 2, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=14)
for i,(x,y,z) in enumerate(zip(xs,ys,zs)):
x.show(ax=axs[i,0], y=y, **kwargs)
x.show(ax=axs[i,1], y=z, **kwargs) | def function[show_xyzs, parameter[self, xs, ys, zs, imgsize, figsize]]:
constant[Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`.]
if name[self]._square_show_res begin[:]
variable[title] assign[=] constant[Ground truth
Predictions]
variable[rows] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[call[name[math].sqrt, parameter[call[name[len], parameter[name[xs]]]]]]]]]
variable[axs] assign[=] call[name[subplots], parameter[name[rows], name[rows]]]
for taget[tuple[[<ast.Name object at 0x7da1b1e76170>, <ast.Name object at 0x7da1b1e744c0>, <ast.Name object at 0x7da1b1e75420>, <ast.Name object at 0x7da1b1e764a0>]]] in starred[call[name[zip], parameter[name[xs], name[ys], name[zs], call[name[axs].flatten, parameter[]]]]] begin[:]
call[name[x].show, parameter[]]
for taget[name[ax]] in starred[call[call[name[axs].flatten, parameter[]]][<ast.Slice object at 0x7da1b1e762f0>]] begin[:]
call[name[ax].axis, parameter[constant[off]]] | keyword[def] identifier[show_xyzs] ( identifier[self] , identifier[xs] , identifier[ys] , identifier[zs] , identifier[imgsize] : identifier[int] = literal[int] , identifier[figsize] : identifier[Optional] [ identifier[Tuple] [ identifier[int] , identifier[int] ]]= keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[_square_show_res] :
identifier[title] = literal[string]
identifier[rows] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[math] . identifier[sqrt] ( identifier[len] ( identifier[xs] ))))
identifier[axs] = identifier[subplots] ( identifier[rows] , identifier[rows] , identifier[imgsize] = identifier[imgsize] , identifier[figsize] = identifier[figsize] , identifier[title] = identifier[title] , identifier[weight] = literal[string] , identifier[size] = literal[int] )
keyword[for] identifier[x] , identifier[y] , identifier[z] , identifier[ax] keyword[in] identifier[zip] ( identifier[xs] , identifier[ys] , identifier[zs] , identifier[axs] . identifier[flatten] ()): identifier[x] . identifier[show] ( identifier[ax] = identifier[ax] , identifier[title] = literal[string] ,** identifier[kwargs] )
keyword[for] identifier[ax] keyword[in] identifier[axs] . identifier[flatten] ()[ identifier[len] ( identifier[xs] ):]: identifier[ax] . identifier[axis] ( literal[string] )
keyword[else] :
identifier[title] = literal[string]
identifier[axs] = identifier[subplots] ( identifier[len] ( identifier[xs] ), literal[int] , identifier[imgsize] = identifier[imgsize] , identifier[figsize] = identifier[figsize] , identifier[title] = identifier[title] , identifier[weight] = literal[string] , identifier[size] = literal[int] )
keyword[for] identifier[i] ,( identifier[x] , identifier[y] , identifier[z] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[xs] , identifier[ys] , identifier[zs] )):
identifier[x] . identifier[show] ( identifier[ax] = identifier[axs] [ identifier[i] , literal[int] ], identifier[y] = identifier[y] ,** identifier[kwargs] )
identifier[x] . identifier[show] ( identifier[ax] = identifier[axs] [ identifier[i] , literal[int] ], identifier[y] = identifier[z] ,** identifier[kwargs] ) | def show_xyzs(self, xs, ys, zs, imgsize: int=4, figsize: Optional[Tuple[int, int]]=None, **kwargs):
"""Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`."""
if self._square_show_res:
title = 'Ground truth\nPredictions'
rows = int(np.ceil(math.sqrt(len(xs))))
axs = subplots(rows, rows, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=12)
for (x, y, z, ax) in zip(xs, ys, zs, axs.flatten()):
x.show(ax=ax, title=f'{str(y)}\n{str(z)}', **kwargs) # depends on [control=['for'], data=[]]
for ax in axs.flatten()[len(xs):]:
ax.axis('off') # depends on [control=['for'], data=['ax']] # depends on [control=['if'], data=[]]
else:
title = 'Ground truth/Predictions'
axs = subplots(len(xs), 2, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=14)
for (i, (x, y, z)) in enumerate(zip(xs, ys, zs)):
x.show(ax=axs[i, 0], y=y, **kwargs)
x.show(ax=axs[i, 1], y=z, **kwargs) # depends on [control=['for'], data=[]] |
def _read_config_file(path=None):
"""
Reads config file.
First look for config file in the current directory, then in the
user's home directory, then in the same directory as this file.
Tries to find config file both with and without preceeding 'dot'
for hidden files (prefer non-hidden).
"""
cfg = configparser.ConfigParser()
if path is None: # pragma: no cover
dirs = [os.curdir, os.path.expanduser('~'),
os.path.dirname(os.path.realpath(__file__))]
locations = map(os.path.abspath, dirs)
for loc in locations:
if cfg.read(os.path.join(loc, 'gpflowrc')):
break
if cfg.read(os.path.join(loc, '.gpflowrc')):
break
else:
if not cfg.read(path):
raise RuntimeError("Config at '{0}' cannot be read".format(path))
return cfg | def function[_read_config_file, parameter[path]]:
constant[
Reads config file.
First look for config file in the current directory, then in the
user's home directory, then in the same directory as this file.
Tries to find config file both with and without preceeding 'dot'
for hidden files (prefer non-hidden).
]
variable[cfg] assign[=] call[name[configparser].ConfigParser, parameter[]]
if compare[name[path] is constant[None]] begin[:]
variable[dirs] assign[=] list[[<ast.Attribute object at 0x7da1b1f48c10>, <ast.Call object at 0x7da1b1f49300>, <ast.Call object at 0x7da1b1f48d30>]]
variable[locations] assign[=] call[name[map], parameter[name[os].path.abspath, name[dirs]]]
for taget[name[loc]] in starred[name[locations]] begin[:]
if call[name[cfg].read, parameter[call[name[os].path.join, parameter[name[loc], constant[gpflowrc]]]]] begin[:]
break
if call[name[cfg].read, parameter[call[name[os].path.join, parameter[name[loc], constant[.gpflowrc]]]]] begin[:]
break
return[name[cfg]] | keyword[def] identifier[_read_config_file] ( identifier[path] = keyword[None] ):
literal[string]
identifier[cfg] = identifier[configparser] . identifier[ConfigParser] ()
keyword[if] identifier[path] keyword[is] keyword[None] :
identifier[dirs] =[ identifier[os] . identifier[curdir] , identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] ),
identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[__file__] ))]
identifier[locations] = identifier[map] ( identifier[os] . identifier[path] . identifier[abspath] , identifier[dirs] )
keyword[for] identifier[loc] keyword[in] identifier[locations] :
keyword[if] identifier[cfg] . identifier[read] ( identifier[os] . identifier[path] . identifier[join] ( identifier[loc] , literal[string] )):
keyword[break]
keyword[if] identifier[cfg] . identifier[read] ( identifier[os] . identifier[path] . identifier[join] ( identifier[loc] , literal[string] )):
keyword[break]
keyword[else] :
keyword[if] keyword[not] identifier[cfg] . identifier[read] ( identifier[path] ):
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[path] ))
keyword[return] identifier[cfg] | def _read_config_file(path=None):
"""
Reads config file.
First look for config file in the current directory, then in the
user's home directory, then in the same directory as this file.
Tries to find config file both with and without preceeding 'dot'
for hidden files (prefer non-hidden).
"""
cfg = configparser.ConfigParser()
if path is None: # pragma: no cover
dirs = [os.curdir, os.path.expanduser('~'), os.path.dirname(os.path.realpath(__file__))]
locations = map(os.path.abspath, dirs)
for loc in locations:
if cfg.read(os.path.join(loc, 'gpflowrc')):
break # depends on [control=['if'], data=[]]
if cfg.read(os.path.join(loc, '.gpflowrc')):
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['loc']] # depends on [control=['if'], data=[]]
elif not cfg.read(path):
raise RuntimeError("Config at '{0}' cannot be read".format(path)) # depends on [control=['if'], data=[]]
return cfg |
def _update_color_hsv(self, event=None):
"""Update display after a change in the HSV spinboxes."""
if event is None or event.widget.old_value != event.widget.get():
h = self.hue.get()
s = self.saturation.get()
v = self.value.get()
sel_color = hsv_to_rgb(h, s, v)
self.red.set(sel_color[0])
self.green.set(sel_color[1])
self.blue.set(sel_color[2])
if self.alpha_channel:
sel_color += (self.alpha.get(),)
self.alphabar.set_color(sel_color)
hexa = rgb_to_hexa(*sel_color)
self.hexa.delete(0, "end")
self.hexa.insert(0, hexa)
self.square.set_hsv((h, s, v))
self.bar.set(h)
self._update_preview() | def function[_update_color_hsv, parameter[self, event]]:
constant[Update display after a change in the HSV spinboxes.]
if <ast.BoolOp object at 0x7da1b2383b20> begin[:]
variable[h] assign[=] call[name[self].hue.get, parameter[]]
variable[s] assign[=] call[name[self].saturation.get, parameter[]]
variable[v] assign[=] call[name[self].value.get, parameter[]]
variable[sel_color] assign[=] call[name[hsv_to_rgb], parameter[name[h], name[s], name[v]]]
call[name[self].red.set, parameter[call[name[sel_color]][constant[0]]]]
call[name[self].green.set, parameter[call[name[sel_color]][constant[1]]]]
call[name[self].blue.set, parameter[call[name[sel_color]][constant[2]]]]
if name[self].alpha_channel begin[:]
<ast.AugAssign object at 0x7da1b2381600>
call[name[self].alphabar.set_color, parameter[name[sel_color]]]
variable[hexa] assign[=] call[name[rgb_to_hexa], parameter[<ast.Starred object at 0x7da1b23824d0>]]
call[name[self].hexa.delete, parameter[constant[0], constant[end]]]
call[name[self].hexa.insert, parameter[constant[0], name[hexa]]]
call[name[self].square.set_hsv, parameter[tuple[[<ast.Name object at 0x7da1b2381ab0>, <ast.Name object at 0x7da1b23823e0>, <ast.Name object at 0x7da1b2380d30>]]]]
call[name[self].bar.set, parameter[name[h]]]
call[name[self]._update_preview, parameter[]] | keyword[def] identifier[_update_color_hsv] ( identifier[self] , identifier[event] = keyword[None] ):
literal[string]
keyword[if] identifier[event] keyword[is] keyword[None] keyword[or] identifier[event] . identifier[widget] . identifier[old_value] != identifier[event] . identifier[widget] . identifier[get] ():
identifier[h] = identifier[self] . identifier[hue] . identifier[get] ()
identifier[s] = identifier[self] . identifier[saturation] . identifier[get] ()
identifier[v] = identifier[self] . identifier[value] . identifier[get] ()
identifier[sel_color] = identifier[hsv_to_rgb] ( identifier[h] , identifier[s] , identifier[v] )
identifier[self] . identifier[red] . identifier[set] ( identifier[sel_color] [ literal[int] ])
identifier[self] . identifier[green] . identifier[set] ( identifier[sel_color] [ literal[int] ])
identifier[self] . identifier[blue] . identifier[set] ( identifier[sel_color] [ literal[int] ])
keyword[if] identifier[self] . identifier[alpha_channel] :
identifier[sel_color] +=( identifier[self] . identifier[alpha] . identifier[get] (),)
identifier[self] . identifier[alphabar] . identifier[set_color] ( identifier[sel_color] )
identifier[hexa] = identifier[rgb_to_hexa] (* identifier[sel_color] )
identifier[self] . identifier[hexa] . identifier[delete] ( literal[int] , literal[string] )
identifier[self] . identifier[hexa] . identifier[insert] ( literal[int] , identifier[hexa] )
identifier[self] . identifier[square] . identifier[set_hsv] (( identifier[h] , identifier[s] , identifier[v] ))
identifier[self] . identifier[bar] . identifier[set] ( identifier[h] )
identifier[self] . identifier[_update_preview] () | def _update_color_hsv(self, event=None):
"""Update display after a change in the HSV spinboxes."""
if event is None or event.widget.old_value != event.widget.get():
h = self.hue.get()
s = self.saturation.get()
v = self.value.get()
sel_color = hsv_to_rgb(h, s, v)
self.red.set(sel_color[0])
self.green.set(sel_color[1])
self.blue.set(sel_color[2])
if self.alpha_channel:
sel_color += (self.alpha.get(),)
self.alphabar.set_color(sel_color) # depends on [control=['if'], data=[]]
hexa = rgb_to_hexa(*sel_color)
self.hexa.delete(0, 'end')
self.hexa.insert(0, hexa)
self.square.set_hsv((h, s, v))
self.bar.set(h)
self._update_preview() # depends on [control=['if'], data=[]] |
def set_default_account_by_address(self, b58_address: str):
"""
This interface is used to set default account by given base58 encode address.
:param b58_address: a base58 encode address.
"""
flag = True
index = -1
for acct in self.accounts:
index += 1
if acct.b58_address == b58_address:
flag = False
break
if flag:
raise SDKException(ErrorCode.get_account_by_address_err)
for i in range(len(self.accounts)):
self.accounts[i].is_default = False
self.accounts[index].is_default = True
self.default_account_address = b58_address | def function[set_default_account_by_address, parameter[self, b58_address]]:
constant[
This interface is used to set default account by given base58 encode address.
:param b58_address: a base58 encode address.
]
variable[flag] assign[=] constant[True]
variable[index] assign[=] <ast.UnaryOp object at 0x7da20e955240>
for taget[name[acct]] in starred[name[self].accounts] begin[:]
<ast.AugAssign object at 0x7da20e9574f0>
if compare[name[acct].b58_address equal[==] name[b58_address]] begin[:]
variable[flag] assign[=] constant[False]
break
if name[flag] begin[:]
<ast.Raise object at 0x7da20e956020>
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].accounts]]]]] begin[:]
call[name[self].accounts][name[i]].is_default assign[=] constant[False]
call[name[self].accounts][name[index]].is_default assign[=] constant[True]
name[self].default_account_address assign[=] name[b58_address] | keyword[def] identifier[set_default_account_by_address] ( identifier[self] , identifier[b58_address] : identifier[str] ):
literal[string]
identifier[flag] = keyword[True]
identifier[index] =- literal[int]
keyword[for] identifier[acct] keyword[in] identifier[self] . identifier[accounts] :
identifier[index] += literal[int]
keyword[if] identifier[acct] . identifier[b58_address] == identifier[b58_address] :
identifier[flag] = keyword[False]
keyword[break]
keyword[if] identifier[flag] :
keyword[raise] identifier[SDKException] ( identifier[ErrorCode] . identifier[get_account_by_address_err] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[accounts] )):
identifier[self] . identifier[accounts] [ identifier[i] ]. identifier[is_default] = keyword[False]
identifier[self] . identifier[accounts] [ identifier[index] ]. identifier[is_default] = keyword[True]
identifier[self] . identifier[default_account_address] = identifier[b58_address] | def set_default_account_by_address(self, b58_address: str):
"""
This interface is used to set default account by given base58 encode address.
:param b58_address: a base58 encode address.
"""
flag = True
index = -1
for acct in self.accounts:
index += 1
if acct.b58_address == b58_address:
flag = False
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['acct']]
if flag:
raise SDKException(ErrorCode.get_account_by_address_err) # depends on [control=['if'], data=[]]
for i in range(len(self.accounts)):
self.accounts[i].is_default = False # depends on [control=['for'], data=['i']]
self.accounts[index].is_default = True
self.default_account_address = b58_address |
def read_var_header(fd, endian):
"""Read full header tag.
Return a dict with the parsed header, the file position of next tag,
a file like object for reading the uncompressed element data.
"""
mtpn, num_bytes = unpack(endian, 'II', fd.read(8))
next_pos = fd.tell() + num_bytes
if mtpn == etypes['miCOMPRESSED']['n']:
# read compressed data
data = fd.read(num_bytes)
dcor = zlib.decompressobj()
# from here, read of the decompressed data
fd_var = BytesIO(dcor.decompress(data))
del data
fd = fd_var
# Check the stream is not so broken as to leave cruft behind
if dcor.flush() != b'':
raise ParseError('Error in compressed data.')
# read full tag from the uncompressed data
mtpn, num_bytes = unpack(endian, 'II', fd.read(8))
if mtpn != etypes['miMATRIX']['n']:
raise ParseError('Expecting miMATRIX type number {}, '
'got {}'.format(etypes['miMATRIX']['n'], mtpn))
# read the header
header = read_header(fd, endian)
return header, next_pos, fd | def function[read_var_header, parameter[fd, endian]]:
constant[Read full header tag.
Return a dict with the parsed header, the file position of next tag,
a file like object for reading the uncompressed element data.
]
<ast.Tuple object at 0x7da1aff6e470> assign[=] call[name[unpack], parameter[name[endian], constant[II], call[name[fd].read, parameter[constant[8]]]]]
variable[next_pos] assign[=] binary_operation[call[name[fd].tell, parameter[]] + name[num_bytes]]
if compare[name[mtpn] equal[==] call[call[name[etypes]][constant[miCOMPRESSED]]][constant[n]]] begin[:]
variable[data] assign[=] call[name[fd].read, parameter[name[num_bytes]]]
variable[dcor] assign[=] call[name[zlib].decompressobj, parameter[]]
variable[fd_var] assign[=] call[name[BytesIO], parameter[call[name[dcor].decompress, parameter[name[data]]]]]
<ast.Delete object at 0x7da1affe46a0>
variable[fd] assign[=] name[fd_var]
if compare[call[name[dcor].flush, parameter[]] not_equal[!=] constant[b'']] begin[:]
<ast.Raise object at 0x7da1affe43a0>
<ast.Tuple object at 0x7da1affe4040> assign[=] call[name[unpack], parameter[name[endian], constant[II], call[name[fd].read, parameter[constant[8]]]]]
if compare[name[mtpn] not_equal[!=] call[call[name[etypes]][constant[miMATRIX]]][constant[n]]] begin[:]
<ast.Raise object at 0x7da1affe45e0>
variable[header] assign[=] call[name[read_header], parameter[name[fd], name[endian]]]
return[tuple[[<ast.Name object at 0x7da1affe41c0>, <ast.Name object at 0x7da1affe5930>, <ast.Name object at 0x7da1affe60b0>]]] | keyword[def] identifier[read_var_header] ( identifier[fd] , identifier[endian] ):
literal[string]
identifier[mtpn] , identifier[num_bytes] = identifier[unpack] ( identifier[endian] , literal[string] , identifier[fd] . identifier[read] ( literal[int] ))
identifier[next_pos] = identifier[fd] . identifier[tell] ()+ identifier[num_bytes]
keyword[if] identifier[mtpn] == identifier[etypes] [ literal[string] ][ literal[string] ]:
identifier[data] = identifier[fd] . identifier[read] ( identifier[num_bytes] )
identifier[dcor] = identifier[zlib] . identifier[decompressobj] ()
identifier[fd_var] = identifier[BytesIO] ( identifier[dcor] . identifier[decompress] ( identifier[data] ))
keyword[del] identifier[data]
identifier[fd] = identifier[fd_var]
keyword[if] identifier[dcor] . identifier[flush] ()!= literal[string] :
keyword[raise] identifier[ParseError] ( literal[string] )
identifier[mtpn] , identifier[num_bytes] = identifier[unpack] ( identifier[endian] , literal[string] , identifier[fd] . identifier[read] ( literal[int] ))
keyword[if] identifier[mtpn] != identifier[etypes] [ literal[string] ][ literal[string] ]:
keyword[raise] identifier[ParseError] ( literal[string]
literal[string] . identifier[format] ( identifier[etypes] [ literal[string] ][ literal[string] ], identifier[mtpn] ))
identifier[header] = identifier[read_header] ( identifier[fd] , identifier[endian] )
keyword[return] identifier[header] , identifier[next_pos] , identifier[fd] | def read_var_header(fd, endian):
"""Read full header tag.
Return a dict with the parsed header, the file position of next tag,
a file like object for reading the uncompressed element data.
"""
(mtpn, num_bytes) = unpack(endian, 'II', fd.read(8))
next_pos = fd.tell() + num_bytes
if mtpn == etypes['miCOMPRESSED']['n']:
# read compressed data
data = fd.read(num_bytes)
dcor = zlib.decompressobj()
# from here, read of the decompressed data
fd_var = BytesIO(dcor.decompress(data))
del data
fd = fd_var
# Check the stream is not so broken as to leave cruft behind
if dcor.flush() != b'':
raise ParseError('Error in compressed data.') # depends on [control=['if'], data=[]]
# read full tag from the uncompressed data
(mtpn, num_bytes) = unpack(endian, 'II', fd.read(8)) # depends on [control=['if'], data=['mtpn']]
if mtpn != etypes['miMATRIX']['n']:
raise ParseError('Expecting miMATRIX type number {}, got {}'.format(etypes['miMATRIX']['n'], mtpn)) # depends on [control=['if'], data=['mtpn']]
# read the header
header = read_header(fd, endian)
return (header, next_pos, fd) |
def ping(dest_ip=None, **kwargs):
'''
Send a ping RPC to a device
dest_ip
The IP of the device to ping
dev_timeout : 30
The NETCONF RPC timeout (in seconds)
rapid : False
When ``True``, executes ping at 100pps instead of 1pps
ttl
Maximum number of IP routers (IP hops) allowed between source and
destination
routing_instance
Name of the routing instance to use to send the ping
interface
Interface used to send traffic
count : 5
Number of packets to send
CLI Examples:
.. code-block:: bash
salt 'device_name' junos.ping '8.8.8.8' count=5
salt 'device_name' junos.ping '8.8.8.8' ttl=1 rapid=True
'''
conn = __proxy__['junos.conn']()
ret = {}
if dest_ip is None:
ret['message'] = 'Please specify the destination ip to ping.'
ret['out'] = False
return ret
op = {'host': dest_ip}
if '__pub_arg' in kwargs:
if kwargs['__pub_arg']:
if isinstance(kwargs['__pub_arg'][-1], dict):
op.update(kwargs['__pub_arg'][-1])
else:
op.update(kwargs)
op['count'] = six.text_type(op.pop('count', 5))
if 'ttl' in op:
op['ttl'] = six.text_type(op['ttl'])
ret['out'] = True
try:
ret['message'] = jxmlease.parse(etree.tostring(conn.rpc.ping(**op)))
except Exception as exception:
ret['message'] = 'Execution failed due to "{0}"'.format(exception)
ret['out'] = False
return ret | def function[ping, parameter[dest_ip]]:
constant[
Send a ping RPC to a device
dest_ip
The IP of the device to ping
dev_timeout : 30
The NETCONF RPC timeout (in seconds)
rapid : False
When ``True``, executes ping at 100pps instead of 1pps
ttl
Maximum number of IP routers (IP hops) allowed between source and
destination
routing_instance
Name of the routing instance to use to send the ping
interface
Interface used to send traffic
count : 5
Number of packets to send
CLI Examples:
.. code-block:: bash
salt 'device_name' junos.ping '8.8.8.8' count=5
salt 'device_name' junos.ping '8.8.8.8' ttl=1 rapid=True
]
variable[conn] assign[=] call[call[name[__proxy__]][constant[junos.conn]], parameter[]]
variable[ret] assign[=] dictionary[[], []]
if compare[name[dest_ip] is constant[None]] begin[:]
call[name[ret]][constant[message]] assign[=] constant[Please specify the destination ip to ping.]
call[name[ret]][constant[out]] assign[=] constant[False]
return[name[ret]]
variable[op] assign[=] dictionary[[<ast.Constant object at 0x7da18f720c70>], [<ast.Name object at 0x7da18f722920>]]
if compare[constant[__pub_arg] in name[kwargs]] begin[:]
if call[name[kwargs]][constant[__pub_arg]] begin[:]
if call[name[isinstance], parameter[call[call[name[kwargs]][constant[__pub_arg]]][<ast.UnaryOp object at 0x7da18f7213c0>], name[dict]]] begin[:]
call[name[op].update, parameter[call[call[name[kwargs]][constant[__pub_arg]]][<ast.UnaryOp object at 0x7da18f58d900>]]]
call[name[op]][constant[count]] assign[=] call[name[six].text_type, parameter[call[name[op].pop, parameter[constant[count], constant[5]]]]]
if compare[constant[ttl] in name[op]] begin[:]
call[name[op]][constant[ttl]] assign[=] call[name[six].text_type, parameter[call[name[op]][constant[ttl]]]]
call[name[ret]][constant[out]] assign[=] constant[True]
<ast.Try object at 0x7da18f58ce20>
return[name[ret]] | keyword[def] identifier[ping] ( identifier[dest_ip] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[conn] = identifier[__proxy__] [ literal[string] ]()
identifier[ret] ={}
keyword[if] identifier[dest_ip] keyword[is] keyword[None] :
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
identifier[op] ={ literal[string] : identifier[dest_ip] }
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[if] identifier[kwargs] [ literal[string] ]:
keyword[if] identifier[isinstance] ( identifier[kwargs] [ literal[string] ][- literal[int] ], identifier[dict] ):
identifier[op] . identifier[update] ( identifier[kwargs] [ literal[string] ][- literal[int] ])
keyword[else] :
identifier[op] . identifier[update] ( identifier[kwargs] )
identifier[op] [ literal[string] ]= identifier[six] . identifier[text_type] ( identifier[op] . identifier[pop] ( literal[string] , literal[int] ))
keyword[if] literal[string] keyword[in] identifier[op] :
identifier[op] [ literal[string] ]= identifier[six] . identifier[text_type] ( identifier[op] [ literal[string] ])
identifier[ret] [ literal[string] ]= keyword[True]
keyword[try] :
identifier[ret] [ literal[string] ]= identifier[jxmlease] . identifier[parse] ( identifier[etree] . identifier[tostring] ( identifier[conn] . identifier[rpc] . identifier[ping] (** identifier[op] )))
keyword[except] identifier[Exception] keyword[as] identifier[exception] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[exception] )
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret] | def ping(dest_ip=None, **kwargs):
"""
Send a ping RPC to a device
dest_ip
The IP of the device to ping
dev_timeout : 30
The NETCONF RPC timeout (in seconds)
rapid : False
When ``True``, executes ping at 100pps instead of 1pps
ttl
Maximum number of IP routers (IP hops) allowed between source and
destination
routing_instance
Name of the routing instance to use to send the ping
interface
Interface used to send traffic
count : 5
Number of packets to send
CLI Examples:
.. code-block:: bash
salt 'device_name' junos.ping '8.8.8.8' count=5
salt 'device_name' junos.ping '8.8.8.8' ttl=1 rapid=True
"""
conn = __proxy__['junos.conn']()
ret = {}
if dest_ip is None:
ret['message'] = 'Please specify the destination ip to ping.'
ret['out'] = False
return ret # depends on [control=['if'], data=[]]
op = {'host': dest_ip}
if '__pub_arg' in kwargs:
if kwargs['__pub_arg']:
if isinstance(kwargs['__pub_arg'][-1], dict):
op.update(kwargs['__pub_arg'][-1]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['kwargs']]
else:
op.update(kwargs)
op['count'] = six.text_type(op.pop('count', 5))
if 'ttl' in op:
op['ttl'] = six.text_type(op['ttl']) # depends on [control=['if'], data=['op']]
ret['out'] = True
try:
ret['message'] = jxmlease.parse(etree.tostring(conn.rpc.ping(**op))) # depends on [control=['try'], data=[]]
except Exception as exception:
ret['message'] = 'Execution failed due to "{0}"'.format(exception)
ret['out'] = False # depends on [control=['except'], data=['exception']]
return ret |
def have_cycle(graph:dict) -> frozenset:
"""Perform a topologic sort to detect any cycle.
Return the set of unsortable nodes. If at least one item,
then there is cycle in given graph.
"""
# topological sort
walked = set() # walked nodes
nodes = frozenset(it.chain(it.chain.from_iterable(graph.values()), graph.keys())) # all nodes of the graph
preds = reversed_graph(graph) # succ: preds
last_walked_len = -1
while last_walked_len != len(walked):
last_walked_len = len(walked)
for node in nodes - walked:
if len(preds.get(node, set()) - walked) == 0:
walked.add(node)
return frozenset(nodes - walked) | def function[have_cycle, parameter[graph]]:
constant[Perform a topologic sort to detect any cycle.
Return the set of unsortable nodes. If at least one item,
then there is cycle in given graph.
]
variable[walked] assign[=] call[name[set], parameter[]]
variable[nodes] assign[=] call[name[frozenset], parameter[call[name[it].chain, parameter[call[name[it].chain.from_iterable, parameter[call[name[graph].values, parameter[]]]], call[name[graph].keys, parameter[]]]]]]
variable[preds] assign[=] call[name[reversed_graph], parameter[name[graph]]]
variable[last_walked_len] assign[=] <ast.UnaryOp object at 0x7da20c6e6560>
while compare[name[last_walked_len] not_equal[!=] call[name[len], parameter[name[walked]]]] begin[:]
variable[last_walked_len] assign[=] call[name[len], parameter[name[walked]]]
for taget[name[node]] in starred[binary_operation[name[nodes] - name[walked]]] begin[:]
if compare[call[name[len], parameter[binary_operation[call[name[preds].get, parameter[name[node], call[name[set], parameter[]]]] - name[walked]]]] equal[==] constant[0]] begin[:]
call[name[walked].add, parameter[name[node]]]
return[call[name[frozenset], parameter[binary_operation[name[nodes] - name[walked]]]]] | keyword[def] identifier[have_cycle] ( identifier[graph] : identifier[dict] )-> identifier[frozenset] :
literal[string]
identifier[walked] = identifier[set] ()
identifier[nodes] = identifier[frozenset] ( identifier[it] . identifier[chain] ( identifier[it] . identifier[chain] . identifier[from_iterable] ( identifier[graph] . identifier[values] ()), identifier[graph] . identifier[keys] ()))
identifier[preds] = identifier[reversed_graph] ( identifier[graph] )
identifier[last_walked_len] =- literal[int]
keyword[while] identifier[last_walked_len] != identifier[len] ( identifier[walked] ):
identifier[last_walked_len] = identifier[len] ( identifier[walked] )
keyword[for] identifier[node] keyword[in] identifier[nodes] - identifier[walked] :
keyword[if] identifier[len] ( identifier[preds] . identifier[get] ( identifier[node] , identifier[set] ())- identifier[walked] )== literal[int] :
identifier[walked] . identifier[add] ( identifier[node] )
keyword[return] identifier[frozenset] ( identifier[nodes] - identifier[walked] ) | def have_cycle(graph: dict) -> frozenset:
"""Perform a topologic sort to detect any cycle.
Return the set of unsortable nodes. If at least one item,
then there is cycle in given graph.
"""
# topological sort
walked = set() # walked nodes
nodes = frozenset(it.chain(it.chain.from_iterable(graph.values()), graph.keys())) # all nodes of the graph
preds = reversed_graph(graph) # succ: preds
last_walked_len = -1
while last_walked_len != len(walked):
last_walked_len = len(walked)
for node in nodes - walked:
if len(preds.get(node, set()) - walked) == 0:
walked.add(node) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']] # depends on [control=['while'], data=['last_walked_len']]
return frozenset(nodes - walked) |
async def get_authenticated_user(
self, redirect_uri: str, code: str
) -> Dict[str, Any]:
"""Handles the login for the Google user, returning an access token.
The result is a dictionary containing an ``access_token`` field
([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)).
Unlike other ``get_authenticated_user`` methods in this package,
this method does not return any additional information about the user.
The returned access token can be used with `OAuth2Mixin.oauth2_request`
to request additional information (perhaps from
``https://www.googleapis.com/oauth2/v2/userinfo``)
Example usage:
.. testcode::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
async def get(self):
if self.get_argument('code', False):
access = await self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
user = await self.oauth2_request(
"https://www.googleapis.com/oauth2/v1/userinfo",
access_token=access["access_token"])
# Save the user and access token with
# e.g. set_secure_cookie.
else:
await self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
.. testoutput::
:hide:
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned awaitable object instead.
""" # noqa: E501
handler = cast(RequestHandler, self)
http = self.get_auth_http_client()
body = urllib.parse.urlencode(
{
"redirect_uri": redirect_uri,
"code": code,
"client_id": handler.settings[self._OAUTH_SETTINGS_KEY]["key"],
"client_secret": handler.settings[self._OAUTH_SETTINGS_KEY]["secret"],
"grant_type": "authorization_code",
}
)
response = await http.fetch(
self._OAUTH_ACCESS_TOKEN_URL,
method="POST",
headers={"Content-Type": "application/x-www-form-urlencoded"},
body=body,
)
return escape.json_decode(response.body) | <ast.AsyncFunctionDef object at 0x7da1b1f76770> | keyword[async] keyword[def] identifier[get_authenticated_user] (
identifier[self] , identifier[redirect_uri] : identifier[str] , identifier[code] : identifier[str]
)-> identifier[Dict] [ identifier[str] , identifier[Any] ]:
literal[string]
identifier[handler] = identifier[cast] ( identifier[RequestHandler] , identifier[self] )
identifier[http] = identifier[self] . identifier[get_auth_http_client] ()
identifier[body] = identifier[urllib] . identifier[parse] . identifier[urlencode] (
{
literal[string] : identifier[redirect_uri] ,
literal[string] : identifier[code] ,
literal[string] : identifier[handler] . identifier[settings] [ identifier[self] . identifier[_OAUTH_SETTINGS_KEY] ][ literal[string] ],
literal[string] : identifier[handler] . identifier[settings] [ identifier[self] . identifier[_OAUTH_SETTINGS_KEY] ][ literal[string] ],
literal[string] : literal[string] ,
}
)
identifier[response] = keyword[await] identifier[http] . identifier[fetch] (
identifier[self] . identifier[_OAUTH_ACCESS_TOKEN_URL] ,
identifier[method] = literal[string] ,
identifier[headers] ={ literal[string] : literal[string] },
identifier[body] = identifier[body] ,
)
keyword[return] identifier[escape] . identifier[json_decode] ( identifier[response] . identifier[body] ) | async def get_authenticated_user(self, redirect_uri: str, code: str) -> Dict[str, Any]:
"""Handles the login for the Google user, returning an access token.
The result is a dictionary containing an ``access_token`` field
([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)).
Unlike other ``get_authenticated_user`` methods in this package,
this method does not return any additional information about the user.
The returned access token can be used with `OAuth2Mixin.oauth2_request`
to request additional information (perhaps from
``https://www.googleapis.com/oauth2/v2/userinfo``)
Example usage:
.. testcode::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
async def get(self):
if self.get_argument('code', False):
access = await self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
user = await self.oauth2_request(
"https://www.googleapis.com/oauth2/v1/userinfo",
access_token=access["access_token"])
# Save the user and access token with
# e.g. set_secure_cookie.
else:
await self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
.. testoutput::
:hide:
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned awaitable object instead.
""" # noqa: E501
handler = cast(RequestHandler, self)
http = self.get_auth_http_client()
body = urllib.parse.urlencode({'redirect_uri': redirect_uri, 'code': code, 'client_id': handler.settings[self._OAUTH_SETTINGS_KEY]['key'], 'client_secret': handler.settings[self._OAUTH_SETTINGS_KEY]['secret'], 'grant_type': 'authorization_code'})
response = await http.fetch(self._OAUTH_ACCESS_TOKEN_URL, method='POST', headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)
return escape.json_decode(response.body) |
def allButDOI(self):
"""
Returns a string of the normalized values from the Citation excluding the DOI number. Equivalent to getting the ID with [ID()](#metaknowledge.citation.Citation.ID) then appending the extra values from [Extra()](#metaknowledge.citation.Citation.Extra) and then removing the substring containing the DOI number.
# Returns
`str`
> A string containing the data of the Citation.
"""
extraTags = ['extraAuthors', 'V', 'issue', 'P', 'misc']
s = self.ID()
extras = []
for tag in extraTags:
if getattr(self, tag, False):
extras.append(str(getattr(self, tag)))
if len(extras) > 0:
return "{0}, {1}".format(s, ', '.join(extras))
else:
return s | def function[allButDOI, parameter[self]]:
constant[
Returns a string of the normalized values from the Citation excluding the DOI number. Equivalent to getting the ID with [ID()](#metaknowledge.citation.Citation.ID) then appending the extra values from [Extra()](#metaknowledge.citation.Citation.Extra) and then removing the substring containing the DOI number.
# Returns
`str`
> A string containing the data of the Citation.
]
variable[extraTags] assign[=] list[[<ast.Constant object at 0x7da1b0f29b40>, <ast.Constant object at 0x7da1b0f2b370>, <ast.Constant object at 0x7da1b0f2b550>, <ast.Constant object at 0x7da1b0f2aa10>, <ast.Constant object at 0x7da1b0f29fc0>]]
variable[s] assign[=] call[name[self].ID, parameter[]]
variable[extras] assign[=] list[[]]
for taget[name[tag]] in starred[name[extraTags]] begin[:]
if call[name[getattr], parameter[name[self], name[tag], constant[False]]] begin[:]
call[name[extras].append, parameter[call[name[str], parameter[call[name[getattr], parameter[name[self], name[tag]]]]]]]
if compare[call[name[len], parameter[name[extras]]] greater[>] constant[0]] begin[:]
return[call[constant[{0}, {1}].format, parameter[name[s], call[constant[, ].join, parameter[name[extras]]]]]] | keyword[def] identifier[allButDOI] ( identifier[self] ):
literal[string]
identifier[extraTags] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[s] = identifier[self] . identifier[ID] ()
identifier[extras] =[]
keyword[for] identifier[tag] keyword[in] identifier[extraTags] :
keyword[if] identifier[getattr] ( identifier[self] , identifier[tag] , keyword[False] ):
identifier[extras] . identifier[append] ( identifier[str] ( identifier[getattr] ( identifier[self] , identifier[tag] )))
keyword[if] identifier[len] ( identifier[extras] )> literal[int] :
keyword[return] literal[string] . identifier[format] ( identifier[s] , literal[string] . identifier[join] ( identifier[extras] ))
keyword[else] :
keyword[return] identifier[s] | def allButDOI(self):
"""
Returns a string of the normalized values from the Citation excluding the DOI number. Equivalent to getting the ID with [ID()](#metaknowledge.citation.Citation.ID) then appending the extra values from [Extra()](#metaknowledge.citation.Citation.Extra) and then removing the substring containing the DOI number.
# Returns
`str`
> A string containing the data of the Citation.
"""
extraTags = ['extraAuthors', 'V', 'issue', 'P', 'misc']
s = self.ID()
extras = []
for tag in extraTags:
if getattr(self, tag, False):
extras.append(str(getattr(self, tag))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tag']]
if len(extras) > 0:
return '{0}, {1}'.format(s, ', '.join(extras)) # depends on [control=['if'], data=[]]
else:
return s |
def to_dict(self, instance, d):
"""
Override the implementation from :class:`Text` by storing the formatted
value in the XML attribute instead of the character data.
If the value is :data:`None`, no element is generated.
"""
value = self.__get__(instance, type(instance))
if value == self.default:
return
d[self.tag] = self.type_.format(value) | def function[to_dict, parameter[self, instance, d]]:
constant[
Override the implementation from :class:`Text` by storing the formatted
value in the XML attribute instead of the character data.
If the value is :data:`None`, no element is generated.
]
variable[value] assign[=] call[name[self].__get__, parameter[name[instance], call[name[type], parameter[name[instance]]]]]
if compare[name[value] equal[==] name[self].default] begin[:]
return[None]
call[name[d]][name[self].tag] assign[=] call[name[self].type_.format, parameter[name[value]]] | keyword[def] identifier[to_dict] ( identifier[self] , identifier[instance] , identifier[d] ):
literal[string]
identifier[value] = identifier[self] . identifier[__get__] ( identifier[instance] , identifier[type] ( identifier[instance] ))
keyword[if] identifier[value] == identifier[self] . identifier[default] :
keyword[return]
identifier[d] [ identifier[self] . identifier[tag] ]= identifier[self] . identifier[type_] . identifier[format] ( identifier[value] ) | def to_dict(self, instance, d):
"""
Override the implementation from :class:`Text` by storing the formatted
value in the XML attribute instead of the character data.
If the value is :data:`None`, no element is generated.
"""
value = self.__get__(instance, type(instance))
if value == self.default:
return # depends on [control=['if'], data=[]]
d[self.tag] = self.type_.format(value) |
def nexson_frag_write_newick(out,
edges,
nodes,
otu_group,
label_key,
leaf_labels,
root_id,
needs_quotes_pattern=NEWICK_NEEDING_QUOTING,
ingroup_id=None,
bracket_ingroup=False,
with_edge_lengths=True):
"""`label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list
"""
unlabeled_counter = 0
curr_node_id = root_id
assert curr_node_id
curr_edge = None
curr_sib_list = []
curr_stack = []
going_tipward = True
while True:
if going_tipward:
outgoing_edges = edges.get(curr_node_id)
if outgoing_edges is None:
curr_node = nodes[curr_node_id]
assert curr_node_id is not None
assert curr_node_id is not None
unlabeled_counter = _write_newick_leaf_label(out,
curr_node_id,
curr_node,
otu_group,
label_key,
leaf_labels,
unlabeled_counter,
needs_quotes_pattern)
if with_edge_lengths:
_write_newick_edge_len(out, curr_edge)
going_tipward = False
else:
te = [(i, e) for i, e in outgoing_edges.items()]
te.sort() # produce a consistent rotation... Necessary?
if bracket_ingroup and (ingroup_id == curr_node_id):
out.write('[pre-ingroup-marker]')
out.write('(')
next_p = te.pop(0)
curr_stack.append((curr_edge, curr_node_id, curr_sib_list))
curr_edge, curr_sib_list = next_p[1], te
curr_node_id = curr_edge['@target']
if not going_tipward:
next_up_edge_id = None
while True:
if curr_sib_list:
out.write(',')
next_up_edge_id, next_up_edge = curr_sib_list.pop(0)
break
if curr_stack:
curr_edge, curr_node_id, curr_sib_list = curr_stack.pop(-1)
curr_node = nodes[curr_node_id]
out.write(')')
_write_newick_internal_label(out,
curr_node_id,
curr_node,
otu_group,
label_key,
needs_quotes_pattern)
if with_edge_lengths:
_write_newick_edge_len(out, curr_edge)
if bracket_ingroup and (ingroup_id == curr_node_id):
out.write('[post-ingroup-marker]')
else:
break
if next_up_edge_id is None:
break
curr_edge = next_up_edge
curr_node_id = curr_edge['@target']
going_tipward = True
out.write(';') | def function[nexson_frag_write_newick, parameter[out, edges, nodes, otu_group, label_key, leaf_labels, root_id, needs_quotes_pattern, ingroup_id, bracket_ingroup, with_edge_lengths]]:
constant[`label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list
]
variable[unlabeled_counter] assign[=] constant[0]
variable[curr_node_id] assign[=] name[root_id]
assert[name[curr_node_id]]
variable[curr_edge] assign[=] constant[None]
variable[curr_sib_list] assign[=] list[[]]
variable[curr_stack] assign[=] list[[]]
variable[going_tipward] assign[=] constant[True]
while constant[True] begin[:]
if name[going_tipward] begin[:]
variable[outgoing_edges] assign[=] call[name[edges].get, parameter[name[curr_node_id]]]
if compare[name[outgoing_edges] is constant[None]] begin[:]
variable[curr_node] assign[=] call[name[nodes]][name[curr_node_id]]
assert[compare[name[curr_node_id] is_not constant[None]]]
assert[compare[name[curr_node_id] is_not constant[None]]]
variable[unlabeled_counter] assign[=] call[name[_write_newick_leaf_label], parameter[name[out], name[curr_node_id], name[curr_node], name[otu_group], name[label_key], name[leaf_labels], name[unlabeled_counter], name[needs_quotes_pattern]]]
if name[with_edge_lengths] begin[:]
call[name[_write_newick_edge_len], parameter[name[out], name[curr_edge]]]
variable[going_tipward] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da18f00d930> begin[:]
variable[next_up_edge_id] assign[=] constant[None]
while constant[True] begin[:]
if name[curr_sib_list] begin[:]
call[name[out].write, parameter[constant[,]]]
<ast.Tuple object at 0x7da18f00f400> assign[=] call[name[curr_sib_list].pop, parameter[constant[0]]]
break
if name[curr_stack] begin[:]
<ast.Tuple object at 0x7da18f00f7c0> assign[=] call[name[curr_stack].pop, parameter[<ast.UnaryOp object at 0x7da18f00f5e0>]]
variable[curr_node] assign[=] call[name[nodes]][name[curr_node_id]]
call[name[out].write, parameter[constant[)]]]
call[name[_write_newick_internal_label], parameter[name[out], name[curr_node_id], name[curr_node], name[otu_group], name[label_key], name[needs_quotes_pattern]]]
if name[with_edge_lengths] begin[:]
call[name[_write_newick_edge_len], parameter[name[out], name[curr_edge]]]
if <ast.BoolOp object at 0x7da18f00f220> begin[:]
call[name[out].write, parameter[constant[[post-ingroup-marker]]]]
if compare[name[next_up_edge_id] is constant[None]] begin[:]
break
variable[curr_edge] assign[=] name[next_up_edge]
variable[curr_node_id] assign[=] call[name[curr_edge]][constant[@target]]
variable[going_tipward] assign[=] constant[True]
call[name[out].write, parameter[constant[;]]] | keyword[def] identifier[nexson_frag_write_newick] ( identifier[out] ,
identifier[edges] ,
identifier[nodes] ,
identifier[otu_group] ,
identifier[label_key] ,
identifier[leaf_labels] ,
identifier[root_id] ,
identifier[needs_quotes_pattern] = identifier[NEWICK_NEEDING_QUOTING] ,
identifier[ingroup_id] = keyword[None] ,
identifier[bracket_ingroup] = keyword[False] ,
identifier[with_edge_lengths] = keyword[True] ):
literal[string]
identifier[unlabeled_counter] = literal[int]
identifier[curr_node_id] = identifier[root_id]
keyword[assert] identifier[curr_node_id]
identifier[curr_edge] = keyword[None]
identifier[curr_sib_list] =[]
identifier[curr_stack] =[]
identifier[going_tipward] = keyword[True]
keyword[while] keyword[True] :
keyword[if] identifier[going_tipward] :
identifier[outgoing_edges] = identifier[edges] . identifier[get] ( identifier[curr_node_id] )
keyword[if] identifier[outgoing_edges] keyword[is] keyword[None] :
identifier[curr_node] = identifier[nodes] [ identifier[curr_node_id] ]
keyword[assert] identifier[curr_node_id] keyword[is] keyword[not] keyword[None]
keyword[assert] identifier[curr_node_id] keyword[is] keyword[not] keyword[None]
identifier[unlabeled_counter] = identifier[_write_newick_leaf_label] ( identifier[out] ,
identifier[curr_node_id] ,
identifier[curr_node] ,
identifier[otu_group] ,
identifier[label_key] ,
identifier[leaf_labels] ,
identifier[unlabeled_counter] ,
identifier[needs_quotes_pattern] )
keyword[if] identifier[with_edge_lengths] :
identifier[_write_newick_edge_len] ( identifier[out] , identifier[curr_edge] )
identifier[going_tipward] = keyword[False]
keyword[else] :
identifier[te] =[( identifier[i] , identifier[e] ) keyword[for] identifier[i] , identifier[e] keyword[in] identifier[outgoing_edges] . identifier[items] ()]
identifier[te] . identifier[sort] ()
keyword[if] identifier[bracket_ingroup] keyword[and] ( identifier[ingroup_id] == identifier[curr_node_id] ):
identifier[out] . identifier[write] ( literal[string] )
identifier[out] . identifier[write] ( literal[string] )
identifier[next_p] = identifier[te] . identifier[pop] ( literal[int] )
identifier[curr_stack] . identifier[append] (( identifier[curr_edge] , identifier[curr_node_id] , identifier[curr_sib_list] ))
identifier[curr_edge] , identifier[curr_sib_list] = identifier[next_p] [ literal[int] ], identifier[te]
identifier[curr_node_id] = identifier[curr_edge] [ literal[string] ]
keyword[if] keyword[not] identifier[going_tipward] :
identifier[next_up_edge_id] = keyword[None]
keyword[while] keyword[True] :
keyword[if] identifier[curr_sib_list] :
identifier[out] . identifier[write] ( literal[string] )
identifier[next_up_edge_id] , identifier[next_up_edge] = identifier[curr_sib_list] . identifier[pop] ( literal[int] )
keyword[break]
keyword[if] identifier[curr_stack] :
identifier[curr_edge] , identifier[curr_node_id] , identifier[curr_sib_list] = identifier[curr_stack] . identifier[pop] (- literal[int] )
identifier[curr_node] = identifier[nodes] [ identifier[curr_node_id] ]
identifier[out] . identifier[write] ( literal[string] )
identifier[_write_newick_internal_label] ( identifier[out] ,
identifier[curr_node_id] ,
identifier[curr_node] ,
identifier[otu_group] ,
identifier[label_key] ,
identifier[needs_quotes_pattern] )
keyword[if] identifier[with_edge_lengths] :
identifier[_write_newick_edge_len] ( identifier[out] , identifier[curr_edge] )
keyword[if] identifier[bracket_ingroup] keyword[and] ( identifier[ingroup_id] == identifier[curr_node_id] ):
identifier[out] . identifier[write] ( literal[string] )
keyword[else] :
keyword[break]
keyword[if] identifier[next_up_edge_id] keyword[is] keyword[None] :
keyword[break]
identifier[curr_edge] = identifier[next_up_edge]
identifier[curr_node_id] = identifier[curr_edge] [ literal[string] ]
identifier[going_tipward] = keyword[True]
identifier[out] . identifier[write] ( literal[string] ) | def nexson_frag_write_newick(out, edges, nodes, otu_group, label_key, leaf_labels, root_id, needs_quotes_pattern=NEWICK_NEEDING_QUOTING, ingroup_id=None, bracket_ingroup=False, with_edge_lengths=True):
"""`label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list
"""
unlabeled_counter = 0
curr_node_id = root_id
assert curr_node_id
curr_edge = None
curr_sib_list = []
curr_stack = []
going_tipward = True
while True:
if going_tipward:
outgoing_edges = edges.get(curr_node_id)
if outgoing_edges is None:
curr_node = nodes[curr_node_id]
assert curr_node_id is not None
assert curr_node_id is not None
unlabeled_counter = _write_newick_leaf_label(out, curr_node_id, curr_node, otu_group, label_key, leaf_labels, unlabeled_counter, needs_quotes_pattern)
if with_edge_lengths:
_write_newick_edge_len(out, curr_edge) # depends on [control=['if'], data=[]]
going_tipward = False # depends on [control=['if'], data=[]]
else:
te = [(i, e) for (i, e) in outgoing_edges.items()]
te.sort() # produce a consistent rotation... Necessary?
if bracket_ingroup and ingroup_id == curr_node_id:
out.write('[pre-ingroup-marker]') # depends on [control=['if'], data=[]]
out.write('(')
next_p = te.pop(0)
curr_stack.append((curr_edge, curr_node_id, curr_sib_list))
(curr_edge, curr_sib_list) = (next_p[1], te)
curr_node_id = curr_edge['@target'] # depends on [control=['if'], data=[]]
if not going_tipward:
next_up_edge_id = None
while True:
if curr_sib_list:
out.write(',')
(next_up_edge_id, next_up_edge) = curr_sib_list.pop(0)
break # depends on [control=['if'], data=[]]
if curr_stack:
(curr_edge, curr_node_id, curr_sib_list) = curr_stack.pop(-1)
curr_node = nodes[curr_node_id]
out.write(')')
_write_newick_internal_label(out, curr_node_id, curr_node, otu_group, label_key, needs_quotes_pattern)
if with_edge_lengths:
_write_newick_edge_len(out, curr_edge) # depends on [control=['if'], data=[]]
if bracket_ingroup and ingroup_id == curr_node_id:
out.write('[post-ingroup-marker]') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
if next_up_edge_id is None:
break # depends on [control=['if'], data=[]]
curr_edge = next_up_edge
curr_node_id = curr_edge['@target']
going_tipward = True # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
out.write(';') |
def fetch_routing_table(self, address):
""" Fetch a routing table from a given router address.
:param address: router address
:return: a new RoutingTable instance or None if the given router is
currently unable to provide routing information
:raise ServiceUnavailable: if no writers are available
:raise ProtocolError: if the routing information received is unusable
"""
new_routing_info = self.fetch_routing_info(address)
if new_routing_info is None:
return None
# Parse routing info and count the number of each type of server
new_routing_table = RoutingTable.parse_routing_info(new_routing_info)
num_routers = len(new_routing_table.routers)
num_readers = len(new_routing_table.readers)
num_writers = len(new_routing_table.writers)
# No writers are available. This likely indicates a temporary state,
# such as leader switching, so we should not signal an error.
# When no writers available, then we flag we are reading in absence of writer
self.missing_writer = (num_writers == 0)
# No routers
if num_routers == 0:
raise RoutingProtocolError("No routing servers returned from server %r" % (address,))
# No readers
if num_readers == 0:
raise RoutingProtocolError("No read servers returned from server %r" % (address,))
# At least one of each is fine, so return this table
return new_routing_table | def function[fetch_routing_table, parameter[self, address]]:
constant[ Fetch a routing table from a given router address.
:param address: router address
:return: a new RoutingTable instance or None if the given router is
currently unable to provide routing information
:raise ServiceUnavailable: if no writers are available
:raise ProtocolError: if the routing information received is unusable
]
variable[new_routing_info] assign[=] call[name[self].fetch_routing_info, parameter[name[address]]]
if compare[name[new_routing_info] is constant[None]] begin[:]
return[constant[None]]
variable[new_routing_table] assign[=] call[name[RoutingTable].parse_routing_info, parameter[name[new_routing_info]]]
variable[num_routers] assign[=] call[name[len], parameter[name[new_routing_table].routers]]
variable[num_readers] assign[=] call[name[len], parameter[name[new_routing_table].readers]]
variable[num_writers] assign[=] call[name[len], parameter[name[new_routing_table].writers]]
name[self].missing_writer assign[=] compare[name[num_writers] equal[==] constant[0]]
if compare[name[num_routers] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20c6c6290>
if compare[name[num_readers] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20c6c7430>
return[name[new_routing_table]] | keyword[def] identifier[fetch_routing_table] ( identifier[self] , identifier[address] ):
literal[string]
identifier[new_routing_info] = identifier[self] . identifier[fetch_routing_info] ( identifier[address] )
keyword[if] identifier[new_routing_info] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[new_routing_table] = identifier[RoutingTable] . identifier[parse_routing_info] ( identifier[new_routing_info] )
identifier[num_routers] = identifier[len] ( identifier[new_routing_table] . identifier[routers] )
identifier[num_readers] = identifier[len] ( identifier[new_routing_table] . identifier[readers] )
identifier[num_writers] = identifier[len] ( identifier[new_routing_table] . identifier[writers] )
identifier[self] . identifier[missing_writer] =( identifier[num_writers] == literal[int] )
keyword[if] identifier[num_routers] == literal[int] :
keyword[raise] identifier[RoutingProtocolError] ( literal[string] %( identifier[address] ,))
keyword[if] identifier[num_readers] == literal[int] :
keyword[raise] identifier[RoutingProtocolError] ( literal[string] %( identifier[address] ,))
keyword[return] identifier[new_routing_table] | def fetch_routing_table(self, address):
""" Fetch a routing table from a given router address.
:param address: router address
:return: a new RoutingTable instance or None if the given router is
currently unable to provide routing information
:raise ServiceUnavailable: if no writers are available
:raise ProtocolError: if the routing information received is unusable
"""
new_routing_info = self.fetch_routing_info(address)
if new_routing_info is None:
return None # depends on [control=['if'], data=[]]
# Parse routing info and count the number of each type of server
new_routing_table = RoutingTable.parse_routing_info(new_routing_info)
num_routers = len(new_routing_table.routers)
num_readers = len(new_routing_table.readers)
num_writers = len(new_routing_table.writers)
# No writers are available. This likely indicates a temporary state,
# such as leader switching, so we should not signal an error.
# When no writers available, then we flag we are reading in absence of writer
self.missing_writer = num_writers == 0
# No routers
if num_routers == 0:
raise RoutingProtocolError('No routing servers returned from server %r' % (address,)) # depends on [control=['if'], data=[]]
# No readers
if num_readers == 0:
raise RoutingProtocolError('No read servers returned from server %r' % (address,)) # depends on [control=['if'], data=[]]
# At least one of each is fine, so return this table
return new_routing_table |
def apply_operation(self, symmop, fractional=False):
"""
Apply a symmetry operation to the structure and return the new
structure. The lattice is operated by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop (SymmOp): Symmetry operation to apply.
fractional (bool): Whether the symmetry operation is applied in
fractional space. Defaults to False, i.e., symmetry operation
is applied in cartesian coordinates.
"""
if not fractional:
self._lattice = Lattice([symmop.apply_rotation_only(row)
for row in self._lattice.matrix])
def operate_site(site):
new_cart = symmop.operate(site.coords)
new_frac = self._lattice.get_fractional_coords(new_cart)
return PeriodicSite(site.species, new_frac,
self._lattice,
properties=site.properties)
else:
new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)
self._lattice = Lattice(new_latt)
def operate_site(site):
return PeriodicSite(site.species,
symmop.operate(site.frac_coords),
self._lattice,
properties=site.properties)
self._sites = [operate_site(s) for s in self._sites] | def function[apply_operation, parameter[self, symmop, fractional]]:
constant[
Apply a symmetry operation to the structure and return the new
structure. The lattice is operated by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop (SymmOp): Symmetry operation to apply.
fractional (bool): Whether the symmetry operation is applied in
fractional space. Defaults to False, i.e., symmetry operation
is applied in cartesian coordinates.
]
if <ast.UnaryOp object at 0x7da204565990> begin[:]
name[self]._lattice assign[=] call[name[Lattice], parameter[<ast.ListComp object at 0x7da2041da770>]]
def function[operate_site, parameter[site]]:
variable[new_cart] assign[=] call[name[symmop].operate, parameter[name[site].coords]]
variable[new_frac] assign[=] call[name[self]._lattice.get_fractional_coords, parameter[name[new_cart]]]
return[call[name[PeriodicSite], parameter[name[site].species, name[new_frac], name[self]._lattice]]]
name[self]._sites assign[=] <ast.ListComp object at 0x7da18dc99900> | keyword[def] identifier[apply_operation] ( identifier[self] , identifier[symmop] , identifier[fractional] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[fractional] :
identifier[self] . identifier[_lattice] = identifier[Lattice] ([ identifier[symmop] . identifier[apply_rotation_only] ( identifier[row] )
keyword[for] identifier[row] keyword[in] identifier[self] . identifier[_lattice] . identifier[matrix] ])
keyword[def] identifier[operate_site] ( identifier[site] ):
identifier[new_cart] = identifier[symmop] . identifier[operate] ( identifier[site] . identifier[coords] )
identifier[new_frac] = identifier[self] . identifier[_lattice] . identifier[get_fractional_coords] ( identifier[new_cart] )
keyword[return] identifier[PeriodicSite] ( identifier[site] . identifier[species] , identifier[new_frac] ,
identifier[self] . identifier[_lattice] ,
identifier[properties] = identifier[site] . identifier[properties] )
keyword[else] :
identifier[new_latt] = identifier[np] . identifier[dot] ( identifier[symmop] . identifier[rotation_matrix] , identifier[self] . identifier[_lattice] . identifier[matrix] )
identifier[self] . identifier[_lattice] = identifier[Lattice] ( identifier[new_latt] )
keyword[def] identifier[operate_site] ( identifier[site] ):
keyword[return] identifier[PeriodicSite] ( identifier[site] . identifier[species] ,
identifier[symmop] . identifier[operate] ( identifier[site] . identifier[frac_coords] ),
identifier[self] . identifier[_lattice] ,
identifier[properties] = identifier[site] . identifier[properties] )
identifier[self] . identifier[_sites] =[ identifier[operate_site] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[self] . identifier[_sites] ] | def apply_operation(self, symmop, fractional=False):
"""
Apply a symmetry operation to the structure and return the new
structure. The lattice is operated by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop (SymmOp): Symmetry operation to apply.
fractional (bool): Whether the symmetry operation is applied in
fractional space. Defaults to False, i.e., symmetry operation
is applied in cartesian coordinates.
"""
if not fractional:
self._lattice = Lattice([symmop.apply_rotation_only(row) for row in self._lattice.matrix])
def operate_site(site):
new_cart = symmop.operate(site.coords)
new_frac = self._lattice.get_fractional_coords(new_cart)
return PeriodicSite(site.species, new_frac, self._lattice, properties=site.properties) # depends on [control=['if'], data=[]]
else:
new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)
self._lattice = Lattice(new_latt)
def operate_site(site):
return PeriodicSite(site.species, symmop.operate(site.frac_coords), self._lattice, properties=site.properties)
self._sites = [operate_site(s) for s in self._sites] |
def _equals(v1_indices, v1_values, v2_indices, v2_values):
"""
Check equality between sparse/dense vectors,
v1_indices and v2_indices assume to be strictly increasing.
"""
v1_size = len(v1_values)
v2_size = len(v2_values)
k1 = 0
k2 = 0
all_equal = True
while all_equal:
while k1 < v1_size and v1_values[k1] == 0:
k1 += 1
while k2 < v2_size and v2_values[k2] == 0:
k2 += 1
if k1 >= v1_size or k2 >= v2_size:
return k1 >= v1_size and k2 >= v2_size
all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2]
k1 += 1
k2 += 1
return all_equal | def function[_equals, parameter[v1_indices, v1_values, v2_indices, v2_values]]:
constant[
Check equality between sparse/dense vectors,
v1_indices and v2_indices assume to be strictly increasing.
]
variable[v1_size] assign[=] call[name[len], parameter[name[v1_values]]]
variable[v2_size] assign[=] call[name[len], parameter[name[v2_values]]]
variable[k1] assign[=] constant[0]
variable[k2] assign[=] constant[0]
variable[all_equal] assign[=] constant[True]
while name[all_equal] begin[:]
while <ast.BoolOp object at 0x7da1b20b4d60> begin[:]
<ast.AugAssign object at 0x7da1b20b48e0>
while <ast.BoolOp object at 0x7da1b20b56f0> begin[:]
<ast.AugAssign object at 0x7da1b20b6500>
if <ast.BoolOp object at 0x7da18f58d1e0> begin[:]
return[<ast.BoolOp object at 0x7da18f58cbe0>]
variable[all_equal] assign[=] <ast.BoolOp object at 0x7da18f58f250>
<ast.AugAssign object at 0x7da18f58f0d0>
<ast.AugAssign object at 0x7da18f58c6d0>
return[name[all_equal]] | keyword[def] identifier[_equals] ( identifier[v1_indices] , identifier[v1_values] , identifier[v2_indices] , identifier[v2_values] ):
literal[string]
identifier[v1_size] = identifier[len] ( identifier[v1_values] )
identifier[v2_size] = identifier[len] ( identifier[v2_values] )
identifier[k1] = literal[int]
identifier[k2] = literal[int]
identifier[all_equal] = keyword[True]
keyword[while] identifier[all_equal] :
keyword[while] identifier[k1] < identifier[v1_size] keyword[and] identifier[v1_values] [ identifier[k1] ]== literal[int] :
identifier[k1] += literal[int]
keyword[while] identifier[k2] < identifier[v2_size] keyword[and] identifier[v2_values] [ identifier[k2] ]== literal[int] :
identifier[k2] += literal[int]
keyword[if] identifier[k1] >= identifier[v1_size] keyword[or] identifier[k2] >= identifier[v2_size] :
keyword[return] identifier[k1] >= identifier[v1_size] keyword[and] identifier[k2] >= identifier[v2_size]
identifier[all_equal] = identifier[v1_indices] [ identifier[k1] ]== identifier[v2_indices] [ identifier[k2] ] keyword[and] identifier[v1_values] [ identifier[k1] ]== identifier[v2_values] [ identifier[k2] ]
identifier[k1] += literal[int]
identifier[k2] += literal[int]
keyword[return] identifier[all_equal] | def _equals(v1_indices, v1_values, v2_indices, v2_values):
"""
Check equality between sparse/dense vectors,
v1_indices and v2_indices assume to be strictly increasing.
"""
v1_size = len(v1_values)
v2_size = len(v2_values)
k1 = 0
k2 = 0
all_equal = True
while all_equal:
while k1 < v1_size and v1_values[k1] == 0:
k1 += 1 # depends on [control=['while'], data=[]]
while k2 < v2_size and v2_values[k2] == 0:
k2 += 1 # depends on [control=['while'], data=[]]
if k1 >= v1_size or k2 >= v2_size:
return k1 >= v1_size and k2 >= v2_size # depends on [control=['if'], data=[]]
all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2]
k1 += 1
k2 += 1 # depends on [control=['while'], data=[]]
return all_equal |
def GetDirections(self, origin, destination, sensor = False, mode = None, waypoints = None, alternatives = None, avoid = None, language = None, units = None,
region = None, departure_time = None, arrival_time = None):
'''Get Directions Service
Pls refer to the Google Maps Web API for the details of the remained parameters
'''
params = {
'origin': origin,
'destination': destination,
'sensor': str(sensor).lower()
}
if mode:
params['mode'] = mode
if waypoints:
params['waypoints'] = waypoints
if alternatives:
params['alternatives'] = alternatives
if avoid:
params['avoid'] = avoid
if language:
params['language'] = language
if units:
params['units'] = units
if region:
params['region'] = region
if departure_time:
params['departure_time'] = departure_time
if arrival_time:
params['arrival_time'] = arrival_time
if not self.premier:
url = self.get_url(params)
else:
url = self.get_signed_url(params)
return self.GetService_url(url) | def function[GetDirections, parameter[self, origin, destination, sensor, mode, waypoints, alternatives, avoid, language, units, region, departure_time, arrival_time]]:
constant[Get Directions Service
Pls refer to the Google Maps Web API for the details of the remained parameters
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2041d80d0>, <ast.Constant object at 0x7da2041db7f0>, <ast.Constant object at 0x7da2041d8be0>], [<ast.Name object at 0x7da2041d9510>, <ast.Name object at 0x7da2041d8640>, <ast.Call object at 0x7da2041d8850>]]
if name[mode] begin[:]
call[name[params]][constant[mode]] assign[=] name[mode]
if name[waypoints] begin[:]
call[name[params]][constant[waypoints]] assign[=] name[waypoints]
if name[alternatives] begin[:]
call[name[params]][constant[alternatives]] assign[=] name[alternatives]
if name[avoid] begin[:]
call[name[params]][constant[avoid]] assign[=] name[avoid]
if name[language] begin[:]
call[name[params]][constant[language]] assign[=] name[language]
if name[units] begin[:]
call[name[params]][constant[units]] assign[=] name[units]
if name[region] begin[:]
call[name[params]][constant[region]] assign[=] name[region]
if name[departure_time] begin[:]
call[name[params]][constant[departure_time]] assign[=] name[departure_time]
if name[arrival_time] begin[:]
call[name[params]][constant[arrival_time]] assign[=] name[arrival_time]
if <ast.UnaryOp object at 0x7da2041dae90> begin[:]
variable[url] assign[=] call[name[self].get_url, parameter[name[params]]]
return[call[name[self].GetService_url, parameter[name[url]]]] | keyword[def] identifier[GetDirections] ( identifier[self] , identifier[origin] , identifier[destination] , identifier[sensor] = keyword[False] , identifier[mode] = keyword[None] , identifier[waypoints] = keyword[None] , identifier[alternatives] = keyword[None] , identifier[avoid] = keyword[None] , identifier[language] = keyword[None] , identifier[units] = keyword[None] ,
identifier[region] = keyword[None] , identifier[departure_time] = keyword[None] , identifier[arrival_time] = keyword[None] ):
literal[string]
identifier[params] ={
literal[string] : identifier[origin] ,
literal[string] : identifier[destination] ,
literal[string] : identifier[str] ( identifier[sensor] ). identifier[lower] ()
}
keyword[if] identifier[mode] :
identifier[params] [ literal[string] ]= identifier[mode]
keyword[if] identifier[waypoints] :
identifier[params] [ literal[string] ]= identifier[waypoints]
keyword[if] identifier[alternatives] :
identifier[params] [ literal[string] ]= identifier[alternatives]
keyword[if] identifier[avoid] :
identifier[params] [ literal[string] ]= identifier[avoid]
keyword[if] identifier[language] :
identifier[params] [ literal[string] ]= identifier[language]
keyword[if] identifier[units] :
identifier[params] [ literal[string] ]= identifier[units]
keyword[if] identifier[region] :
identifier[params] [ literal[string] ]= identifier[region]
keyword[if] identifier[departure_time] :
identifier[params] [ literal[string] ]= identifier[departure_time]
keyword[if] identifier[arrival_time] :
identifier[params] [ literal[string] ]= identifier[arrival_time]
keyword[if] keyword[not] identifier[self] . identifier[premier] :
identifier[url] = identifier[self] . identifier[get_url] ( identifier[params] )
keyword[else] :
identifier[url] = identifier[self] . identifier[get_signed_url] ( identifier[params] )
keyword[return] identifier[self] . identifier[GetService_url] ( identifier[url] ) | def GetDirections(self, origin, destination, sensor=False, mode=None, waypoints=None, alternatives=None, avoid=None, language=None, units=None, region=None, departure_time=None, arrival_time=None):
"""Get Directions Service
Pls refer to the Google Maps Web API for the details of the remained parameters
"""
params = {'origin': origin, 'destination': destination, 'sensor': str(sensor).lower()}
if mode:
params['mode'] = mode # depends on [control=['if'], data=[]]
if waypoints:
params['waypoints'] = waypoints # depends on [control=['if'], data=[]]
if alternatives:
params['alternatives'] = alternatives # depends on [control=['if'], data=[]]
if avoid:
params['avoid'] = avoid # depends on [control=['if'], data=[]]
if language:
params['language'] = language # depends on [control=['if'], data=[]]
if units:
params['units'] = units # depends on [control=['if'], data=[]]
if region:
params['region'] = region # depends on [control=['if'], data=[]]
if departure_time:
params['departure_time'] = departure_time # depends on [control=['if'], data=[]]
if arrival_time:
params['arrival_time'] = arrival_time # depends on [control=['if'], data=[]]
if not self.premier:
url = self.get_url(params) # depends on [control=['if'], data=[]]
else:
url = self.get_signed_url(params)
return self.GetService_url(url) |
def get_api_key_with_prefix(self, identifier):
"""
Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.api_key.get(identifier) and self.api_key_prefix.get(identifier):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier]
elif self.api_key.get(identifier):
return self.api_key[identifier] | def function[get_api_key_with_prefix, parameter[self, identifier]]:
constant[
Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
]
if <ast.BoolOp object at 0x7da1b1f608b0> begin[:]
return[binary_operation[binary_operation[call[name[self].api_key_prefix][name[identifier]] + constant[ ]] + call[name[self].api_key][name[identifier]]]] | keyword[def] identifier[get_api_key_with_prefix] ( identifier[self] , identifier[identifier] ):
literal[string]
keyword[if] identifier[self] . identifier[api_key] . identifier[get] ( identifier[identifier] ) keyword[and] identifier[self] . identifier[api_key_prefix] . identifier[get] ( identifier[identifier] ):
keyword[return] identifier[self] . identifier[api_key_prefix] [ identifier[identifier] ]+ literal[string] + identifier[self] . identifier[api_key] [ identifier[identifier] ]
keyword[elif] identifier[self] . identifier[api_key] . identifier[get] ( identifier[identifier] ):
keyword[return] identifier[self] . identifier[api_key] [ identifier[identifier] ] | def get_api_key_with_prefix(self, identifier):
"""
Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.api_key.get(identifier) and self.api_key_prefix.get(identifier):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # depends on [control=['if'], data=[]]
elif self.api_key.get(identifier):
return self.api_key[identifier] # depends on [control=['if'], data=[]] |
def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv:
""" Create vectorized environments """
envs = DummyVecEnv([self._creation_function(i, seed, preset) for i in range(parallel_envs)])
if self.frame_history is not None:
envs = VecFrameStack(envs, self.frame_history)
return envs | def function[instantiate, parameter[self, parallel_envs, seed, preset]]:
constant[ Create vectorized environments ]
variable[envs] assign[=] call[name[DummyVecEnv], parameter[<ast.ListComp object at 0x7da18bcc9450>]]
if compare[name[self].frame_history is_not constant[None]] begin[:]
variable[envs] assign[=] call[name[VecFrameStack], parameter[name[envs], name[self].frame_history]]
return[name[envs]] | keyword[def] identifier[instantiate] ( identifier[self] , identifier[parallel_envs] , identifier[seed] = literal[int] , identifier[preset] = literal[string] )-> identifier[VecEnv] :
literal[string]
identifier[envs] = identifier[DummyVecEnv] ([ identifier[self] . identifier[_creation_function] ( identifier[i] , identifier[seed] , identifier[preset] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[parallel_envs] )])
keyword[if] identifier[self] . identifier[frame_history] keyword[is] keyword[not] keyword[None] :
identifier[envs] = identifier[VecFrameStack] ( identifier[envs] , identifier[self] . identifier[frame_history] )
keyword[return] identifier[envs] | def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv:
""" Create vectorized environments """
envs = DummyVecEnv([self._creation_function(i, seed, preset) for i in range(parallel_envs)])
if self.frame_history is not None:
envs = VecFrameStack(envs, self.frame_history) # depends on [control=['if'], data=[]]
return envs |
def _load_embedding_txt(self, pretrained_file_path, elem_delim, encoding='utf8'):
"""Load embedding vectors from a pre-trained token embedding file.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
text embedding vector initialized by `self._init_unknown_vec`.
If a token is encountered multiple times in the pre-trained text embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
"""
vec_len = None
all_elems = []
tokens = set()
loaded_unknown_vec = None
with io.open(pretrained_file_path, 'rb') as f:
for line_num, line in enumerate(f):
try:
line = line.decode(encoding)
except ValueError:
warnings.warn('line {} in {}: failed to decode. Skipping.'
.format(line_num, pretrained_file_path))
continue
elems = line.rstrip().split(elem_delim)
assert len(elems) > 1, 'line {} in {}: unexpected data format.'.format(
line_num, pretrained_file_path)
token, elems = elems[0], [float(i) for i in elems[1:]]
if token == self.unknown_token and loaded_unknown_vec is None:
loaded_unknown_vec = elems
tokens.add(self.unknown_token)
elif token in tokens:
warnings.warn('line {} in {}: duplicate embedding found for '
'token "{}". Skipped.'.format(line_num, pretrained_file_path,
token))
elif len(elems) == 1 and line_num == 0:
warnings.warn('line {} in {}: skipped likely header line.'
.format(line_num, pretrained_file_path))
else:
if not vec_len:
vec_len = len(elems)
if self.unknown_token:
# Reserve a vector slot for the unknown token at the very beggining
# because the unknown token index is 0.
all_elems.extend([0] * vec_len)
else:
assert len(elems) == vec_len, \
'line {} in {}: found vector of inconsistent dimension for token ' \
'"{}". expected dim: {}, found: {}'.format(line_num,
pretrained_file_path,
token, vec_len, len(elems))
all_elems.extend(elems)
self._idx_to_token.append(token)
self._token_to_idx[token] = len(self._idx_to_token) - 1
tokens.add(token)
self._idx_to_vec = nd.array(all_elems).reshape((-1, vec_len))
if self.unknown_token:
if loaded_unknown_vec is None:
self._idx_to_vec[C.UNK_IDX] = self._init_unknown_vec(shape=vec_len)
else:
self._idx_to_vec[C.UNK_IDX] = nd.array(loaded_unknown_vec) | def function[_load_embedding_txt, parameter[self, pretrained_file_path, elem_delim, encoding]]:
constant[Load embedding vectors from a pre-trained token embedding file.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
text embedding vector initialized by `self._init_unknown_vec`.
If a token is encountered multiple times in the pre-trained text embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
]
variable[vec_len] assign[=] constant[None]
variable[all_elems] assign[=] list[[]]
variable[tokens] assign[=] call[name[set], parameter[]]
variable[loaded_unknown_vec] assign[=] constant[None]
with call[name[io].open, parameter[name[pretrained_file_path], constant[rb]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f721d50>, <ast.Name object at 0x7da18f721ae0>]]] in starred[call[name[enumerate], parameter[name[f]]]] begin[:]
<ast.Try object at 0x7da18f723310>
variable[elems] assign[=] call[call[name[line].rstrip, parameter[]].split, parameter[name[elem_delim]]]
assert[compare[call[name[len], parameter[name[elems]]] greater[>] constant[1]]]
<ast.Tuple object at 0x7da18f720670> assign[=] tuple[[<ast.Subscript object at 0x7da18f721c30>, <ast.ListComp object at 0x7da18f721de0>]]
if <ast.BoolOp object at 0x7da18f7210f0> begin[:]
variable[loaded_unknown_vec] assign[=] name[elems]
call[name[tokens].add, parameter[name[self].unknown_token]]
name[self]._idx_to_vec assign[=] call[call[name[nd].array, parameter[name[all_elems]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da2041d9420>, <ast.Name object at 0x7da2041dbfd0>]]]]
if name[self].unknown_token begin[:]
if compare[name[loaded_unknown_vec] is constant[None]] begin[:]
call[name[self]._idx_to_vec][name[C].UNK_IDX] assign[=] call[name[self]._init_unknown_vec, parameter[]] | keyword[def] identifier[_load_embedding_txt] ( identifier[self] , identifier[pretrained_file_path] , identifier[elem_delim] , identifier[encoding] = literal[string] ):
literal[string]
identifier[vec_len] = keyword[None]
identifier[all_elems] =[]
identifier[tokens] = identifier[set] ()
identifier[loaded_unknown_vec] = keyword[None]
keyword[with] identifier[io] . identifier[open] ( identifier[pretrained_file_path] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line_num] , identifier[line] keyword[in] identifier[enumerate] ( identifier[f] ):
keyword[try] :
identifier[line] = identifier[line] . identifier[decode] ( identifier[encoding] )
keyword[except] identifier[ValueError] :
identifier[warnings] . identifier[warn] ( literal[string]
. identifier[format] ( identifier[line_num] , identifier[pretrained_file_path] ))
keyword[continue]
identifier[elems] = identifier[line] . identifier[rstrip] (). identifier[split] ( identifier[elem_delim] )
keyword[assert] identifier[len] ( identifier[elems] )> literal[int] , literal[string] . identifier[format] (
identifier[line_num] , identifier[pretrained_file_path] )
identifier[token] , identifier[elems] = identifier[elems] [ literal[int] ],[ identifier[float] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[elems] [ literal[int] :]]
keyword[if] identifier[token] == identifier[self] . identifier[unknown_token] keyword[and] identifier[loaded_unknown_vec] keyword[is] keyword[None] :
identifier[loaded_unknown_vec] = identifier[elems]
identifier[tokens] . identifier[add] ( identifier[self] . identifier[unknown_token] )
keyword[elif] identifier[token] keyword[in] identifier[tokens] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] . identifier[format] ( identifier[line_num] , identifier[pretrained_file_path] ,
identifier[token] ))
keyword[elif] identifier[len] ( identifier[elems] )== literal[int] keyword[and] identifier[line_num] == literal[int] :
identifier[warnings] . identifier[warn] ( literal[string]
. identifier[format] ( identifier[line_num] , identifier[pretrained_file_path] ))
keyword[else] :
keyword[if] keyword[not] identifier[vec_len] :
identifier[vec_len] = identifier[len] ( identifier[elems] )
keyword[if] identifier[self] . identifier[unknown_token] :
identifier[all_elems] . identifier[extend] ([ literal[int] ]* identifier[vec_len] )
keyword[else] :
keyword[assert] identifier[len] ( identifier[elems] )== identifier[vec_len] , literal[string] literal[string] . identifier[format] ( identifier[line_num] ,
identifier[pretrained_file_path] ,
identifier[token] , identifier[vec_len] , identifier[len] ( identifier[elems] ))
identifier[all_elems] . identifier[extend] ( identifier[elems] )
identifier[self] . identifier[_idx_to_token] . identifier[append] ( identifier[token] )
identifier[self] . identifier[_token_to_idx] [ identifier[token] ]= identifier[len] ( identifier[self] . identifier[_idx_to_token] )- literal[int]
identifier[tokens] . identifier[add] ( identifier[token] )
identifier[self] . identifier[_idx_to_vec] = identifier[nd] . identifier[array] ( identifier[all_elems] ). identifier[reshape] ((- literal[int] , identifier[vec_len] ))
keyword[if] identifier[self] . identifier[unknown_token] :
keyword[if] identifier[loaded_unknown_vec] keyword[is] keyword[None] :
identifier[self] . identifier[_idx_to_vec] [ identifier[C] . identifier[UNK_IDX] ]= identifier[self] . identifier[_init_unknown_vec] ( identifier[shape] = identifier[vec_len] )
keyword[else] :
identifier[self] . identifier[_idx_to_vec] [ identifier[C] . identifier[UNK_IDX] ]= identifier[nd] . identifier[array] ( identifier[loaded_unknown_vec] ) | def _load_embedding_txt(self, pretrained_file_path, elem_delim, encoding='utf8'):
"""Load embedding vectors from a pre-trained token embedding file.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
text embedding vector initialized by `self._init_unknown_vec`.
If a token is encountered multiple times in the pre-trained text embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
"""
vec_len = None
all_elems = []
tokens = set()
loaded_unknown_vec = None
with io.open(pretrained_file_path, 'rb') as f:
for (line_num, line) in enumerate(f):
try:
line = line.decode(encoding) # depends on [control=['try'], data=[]]
except ValueError:
warnings.warn('line {} in {}: failed to decode. Skipping.'.format(line_num, pretrained_file_path))
continue # depends on [control=['except'], data=[]]
elems = line.rstrip().split(elem_delim)
assert len(elems) > 1, 'line {} in {}: unexpected data format.'.format(line_num, pretrained_file_path)
(token, elems) = (elems[0], [float(i) for i in elems[1:]])
if token == self.unknown_token and loaded_unknown_vec is None:
loaded_unknown_vec = elems
tokens.add(self.unknown_token) # depends on [control=['if'], data=[]]
elif token in tokens:
warnings.warn('line {} in {}: duplicate embedding found for token "{}". Skipped.'.format(line_num, pretrained_file_path, token)) # depends on [control=['if'], data=['token']]
elif len(elems) == 1 and line_num == 0:
warnings.warn('line {} in {}: skipped likely header line.'.format(line_num, pretrained_file_path)) # depends on [control=['if'], data=[]]
else:
if not vec_len:
vec_len = len(elems)
if self.unknown_token:
# Reserve a vector slot for the unknown token at the very beggining
# because the unknown token index is 0.
all_elems.extend([0] * vec_len) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
assert len(elems) == vec_len, 'line {} in {}: found vector of inconsistent dimension for token "{}". expected dim: {}, found: {}'.format(line_num, pretrained_file_path, token, vec_len, len(elems))
all_elems.extend(elems)
self._idx_to_token.append(token)
self._token_to_idx[token] = len(self._idx_to_token) - 1
tokens.add(token) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['f']]
self._idx_to_vec = nd.array(all_elems).reshape((-1, vec_len))
if self.unknown_token:
if loaded_unknown_vec is None:
self._idx_to_vec[C.UNK_IDX] = self._init_unknown_vec(shape=vec_len) # depends on [control=['if'], data=[]]
else:
self._idx_to_vec[C.UNK_IDX] = nd.array(loaded_unknown_vec) # depends on [control=['if'], data=[]] |
def convert_convtranspose(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert transposed convolution layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting transposed convolution ...')
if names == 'short':
tf_name = 'C' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
bias_name = '{0}.bias'.format(w_name)
weights_name = '{0}.weight'.format(w_name)
if len(weights[weights_name].numpy().shape) == 4:
W = weights[weights_name].numpy().transpose(2, 3, 1, 0)
height, width, n_filters, channels = W.shape
n_groups = params['group']
if n_groups > 1:
raise AssertionError('Cannot convert conv1d with groups != 1')
if params['dilations'][0] > 1:
raise AssertionError('Cannot convert conv1d with dilation_rate != 1')
if bias_name in weights:
biases = weights[bias_name].numpy()
has_bias = True
else:
biases = None
has_bias = False
input_name = inputs[0]
if has_bias:
weights = [W, biases]
else:
weights = [W]
conv = keras.layers.Conv2DTranspose(
filters=n_filters,
kernel_size=(height, width),
strides=(params['strides'][0], params['strides'][1]),
padding='valid',
output_padding=0,
weights=weights,
use_bias=has_bias,
activation=None,
dilation_rate=params['dilations'][0],
bias_initializer='zeros', kernel_initializer='zeros',
name=tf_name
)
layers[scope_name] = conv(layers[input_name])
# Magic ad-hoc.
# See the Keras issue: https://github.com/keras-team/keras/issues/6777
layers[scope_name].set_shape(layers[scope_name]._keras_shape)
pads = params['pads']
if pads[0] > 0:
assert(len(pads) == 2 or (pads[2] == pads[0] and pads[3] == pads[1]))
crop = keras.layers.Cropping2D(
pads[:2],
name=tf_name + '_crop'
)
layers[scope_name] = crop(layers[scope_name])
else:
raise AssertionError('Layer is not supported for now') | def function[convert_convtranspose, parameter[params, w_name, scope_name, inputs, layers, weights, names]]:
constant[
Convert transposed convolution layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
]
call[name[print], parameter[constant[Converting transposed convolution ...]]]
if compare[name[names] equal[==] constant[short]] begin[:]
variable[tf_name] assign[=] binary_operation[constant[C] + call[name[random_string], parameter[constant[7]]]]
variable[bias_name] assign[=] call[constant[{0}.bias].format, parameter[name[w_name]]]
variable[weights_name] assign[=] call[constant[{0}.weight].format, parameter[name[w_name]]]
if compare[call[name[len], parameter[call[call[name[weights]][name[weights_name]].numpy, parameter[]].shape]] equal[==] constant[4]] begin[:]
variable[W] assign[=] call[call[call[name[weights]][name[weights_name]].numpy, parameter[]].transpose, parameter[constant[2], constant[3], constant[1], constant[0]]]
<ast.Tuple object at 0x7da1b013eaa0> assign[=] name[W].shape
variable[n_groups] assign[=] call[name[params]][constant[group]]
if compare[name[n_groups] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b013c6d0>
if compare[call[call[name[params]][constant[dilations]]][constant[0]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b013eef0>
if compare[name[bias_name] in name[weights]] begin[:]
variable[biases] assign[=] call[call[name[weights]][name[bias_name]].numpy, parameter[]]
variable[has_bias] assign[=] constant[True]
variable[input_name] assign[=] call[name[inputs]][constant[0]]
if name[has_bias] begin[:]
variable[weights] assign[=] list[[<ast.Name object at 0x7da1b013f2b0>, <ast.Name object at 0x7da1b013fe50>]]
variable[conv] assign[=] call[name[keras].layers.Conv2DTranspose, parameter[]]
call[name[layers]][name[scope_name]] assign[=] call[name[conv], parameter[call[name[layers]][name[input_name]]]]
call[call[name[layers]][name[scope_name]].set_shape, parameter[call[name[layers]][name[scope_name]]._keras_shape]]
variable[pads] assign[=] call[name[params]][constant[pads]]
if compare[call[name[pads]][constant[0]] greater[>] constant[0]] begin[:]
assert[<ast.BoolOp object at 0x7da1b013f4f0>]
variable[crop] assign[=] call[name[keras].layers.Cropping2D, parameter[call[name[pads]][<ast.Slice object at 0x7da1b01200d0>]]]
call[name[layers]][name[scope_name]] assign[=] call[name[crop], parameter[call[name[layers]][name[scope_name]]]] | keyword[def] identifier[convert_convtranspose] ( identifier[params] , identifier[w_name] , identifier[scope_name] , identifier[inputs] , identifier[layers] , identifier[weights] , identifier[names] ):
literal[string]
identifier[print] ( literal[string] )
keyword[if] identifier[names] == literal[string] :
identifier[tf_name] = literal[string] + identifier[random_string] ( literal[int] )
keyword[elif] identifier[names] == literal[string] :
identifier[tf_name] = identifier[w_name]
keyword[else] :
identifier[tf_name] = identifier[w_name] + identifier[str] ( identifier[random] . identifier[random] ())
identifier[bias_name] = literal[string] . identifier[format] ( identifier[w_name] )
identifier[weights_name] = literal[string] . identifier[format] ( identifier[w_name] )
keyword[if] identifier[len] ( identifier[weights] [ identifier[weights_name] ]. identifier[numpy] (). identifier[shape] )== literal[int] :
identifier[W] = identifier[weights] [ identifier[weights_name] ]. identifier[numpy] (). identifier[transpose] ( literal[int] , literal[int] , literal[int] , literal[int] )
identifier[height] , identifier[width] , identifier[n_filters] , identifier[channels] = identifier[W] . identifier[shape]
identifier[n_groups] = identifier[params] [ literal[string] ]
keyword[if] identifier[n_groups] > literal[int] :
keyword[raise] identifier[AssertionError] ( literal[string] )
keyword[if] identifier[params] [ literal[string] ][ literal[int] ]> literal[int] :
keyword[raise] identifier[AssertionError] ( literal[string] )
keyword[if] identifier[bias_name] keyword[in] identifier[weights] :
identifier[biases] = identifier[weights] [ identifier[bias_name] ]. identifier[numpy] ()
identifier[has_bias] = keyword[True]
keyword[else] :
identifier[biases] = keyword[None]
identifier[has_bias] = keyword[False]
identifier[input_name] = identifier[inputs] [ literal[int] ]
keyword[if] identifier[has_bias] :
identifier[weights] =[ identifier[W] , identifier[biases] ]
keyword[else] :
identifier[weights] =[ identifier[W] ]
identifier[conv] = identifier[keras] . identifier[layers] . identifier[Conv2DTranspose] (
identifier[filters] = identifier[n_filters] ,
identifier[kernel_size] =( identifier[height] , identifier[width] ),
identifier[strides] =( identifier[params] [ literal[string] ][ literal[int] ], identifier[params] [ literal[string] ][ literal[int] ]),
identifier[padding] = literal[string] ,
identifier[output_padding] = literal[int] ,
identifier[weights] = identifier[weights] ,
identifier[use_bias] = identifier[has_bias] ,
identifier[activation] = keyword[None] ,
identifier[dilation_rate] = identifier[params] [ literal[string] ][ literal[int] ],
identifier[bias_initializer] = literal[string] , identifier[kernel_initializer] = literal[string] ,
identifier[name] = identifier[tf_name]
)
identifier[layers] [ identifier[scope_name] ]= identifier[conv] ( identifier[layers] [ identifier[input_name] ])
identifier[layers] [ identifier[scope_name] ]. identifier[set_shape] ( identifier[layers] [ identifier[scope_name] ]. identifier[_keras_shape] )
identifier[pads] = identifier[params] [ literal[string] ]
keyword[if] identifier[pads] [ literal[int] ]> literal[int] :
keyword[assert] ( identifier[len] ( identifier[pads] )== literal[int] keyword[or] ( identifier[pads] [ literal[int] ]== identifier[pads] [ literal[int] ] keyword[and] identifier[pads] [ literal[int] ]== identifier[pads] [ literal[int] ]))
identifier[crop] = identifier[keras] . identifier[layers] . identifier[Cropping2D] (
identifier[pads] [: literal[int] ],
identifier[name] = identifier[tf_name] + literal[string]
)
identifier[layers] [ identifier[scope_name] ]= identifier[crop] ( identifier[layers] [ identifier[scope_name] ])
keyword[else] :
keyword[raise] identifier[AssertionError] ( literal[string] ) | def convert_convtranspose(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert transposed convolution layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting transposed convolution ...')
if names == 'short':
tf_name = 'C' + random_string(7) # depends on [control=['if'], data=[]]
elif names == 'keep':
tf_name = w_name # depends on [control=['if'], data=[]]
else:
tf_name = w_name + str(random.random())
bias_name = '{0}.bias'.format(w_name)
weights_name = '{0}.weight'.format(w_name)
if len(weights[weights_name].numpy().shape) == 4:
W = weights[weights_name].numpy().transpose(2, 3, 1, 0)
(height, width, n_filters, channels) = W.shape
n_groups = params['group']
if n_groups > 1:
raise AssertionError('Cannot convert conv1d with groups != 1') # depends on [control=['if'], data=[]]
if params['dilations'][0] > 1:
raise AssertionError('Cannot convert conv1d with dilation_rate != 1') # depends on [control=['if'], data=[]]
if bias_name in weights:
biases = weights[bias_name].numpy()
has_bias = True # depends on [control=['if'], data=['bias_name', 'weights']]
else:
biases = None
has_bias = False
input_name = inputs[0]
if has_bias:
weights = [W, biases] # depends on [control=['if'], data=[]]
else:
weights = [W]
conv = keras.layers.Conv2DTranspose(filters=n_filters, kernel_size=(height, width), strides=(params['strides'][0], params['strides'][1]), padding='valid', output_padding=0, weights=weights, use_bias=has_bias, activation=None, dilation_rate=params['dilations'][0], bias_initializer='zeros', kernel_initializer='zeros', name=tf_name)
layers[scope_name] = conv(layers[input_name])
# Magic ad-hoc.
# See the Keras issue: https://github.com/keras-team/keras/issues/6777
layers[scope_name].set_shape(layers[scope_name]._keras_shape)
pads = params['pads']
if pads[0] > 0:
assert len(pads) == 2 or (pads[2] == pads[0] and pads[3] == pads[1])
crop = keras.layers.Cropping2D(pads[:2], name=tf_name + '_crop')
layers[scope_name] = crop(layers[scope_name]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise AssertionError('Layer is not supported for now') |
def predict_size_distribution_models(self, model_names, input_columns, metadata_cols,
data_mode="forecast", location=6, calibrate=False):
"""
Make predictions using fitted size distribution models.
Args:
model_names: Name of the models for predictions
input_columns: Data columns used for input into ML models
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Set of data used as input for prediction models
location: Value of fixed location parameter
calibrate: Whether or not to apply calibration model
Returns:
Predictions in dictionary of data frames grouped by group type
"""
groups = self.size_distribution_models.keys()
predictions = {}
for group in groups:
group_data = self.data[data_mode]["combo"].loc[self.data[data_mode]["combo"][self.group_col] == group]
predictions[group] = group_data[metadata_cols]
if group_data.shape[0] > 0:
log_mean = self.size_distribution_models[group]["lognorm"]["mean"]
log_sd = self.size_distribution_models[group]["lognorm"]["sd"]
for m, model_name in enumerate(model_names):
multi_predictions = self.size_distribution_models[group]["multi"][model_name].predict(
group_data[input_columns])
if calibrate:
multi_predictions[:, 0] = self.size_distribution_models[group]["calshape"][model_name].predict(
multi_predictions[:, 0:1])
multi_predictions[:, 1] = self.size_distribution_models[group]["calscale"][model_name].predict(
multi_predictions[:, 1:])
multi_predictions = np.exp(multi_predictions * log_sd + log_mean)
if multi_predictions.shape[1] == 2:
multi_predictions_temp = np.zeros((multi_predictions.shape[0], 3))
multi_predictions_temp[:, 0] = multi_predictions[:, 0]
multi_predictions_temp[:, 1] = location
multi_predictions_temp[:, 2] = multi_predictions[:, 1]
multi_predictions = multi_predictions_temp
for p, pred_col in enumerate(["shape", "location", "scale"]):
predictions[group][model_name].loc[:, model_name.replace(" ", "-") + "_" + pred_col] = \
multi_predictions[:, p]
return predictions | def function[predict_size_distribution_models, parameter[self, model_names, input_columns, metadata_cols, data_mode, location, calibrate]]:
constant[
Make predictions using fitted size distribution models.
Args:
model_names: Name of the models for predictions
input_columns: Data columns used for input into ML models
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Set of data used as input for prediction models
location: Value of fixed location parameter
calibrate: Whether or not to apply calibration model
Returns:
Predictions in dictionary of data frames grouped by group type
]
variable[groups] assign[=] call[name[self].size_distribution_models.keys, parameter[]]
variable[predictions] assign[=] dictionary[[], []]
for taget[name[group]] in starred[name[groups]] begin[:]
variable[group_data] assign[=] call[call[call[name[self].data][name[data_mode]]][constant[combo]].loc][compare[call[call[call[name[self].data][name[data_mode]]][constant[combo]]][name[self].group_col] equal[==] name[group]]]
call[name[predictions]][name[group]] assign[=] call[name[group_data]][name[metadata_cols]]
if compare[call[name[group_data].shape][constant[0]] greater[>] constant[0]] begin[:]
variable[log_mean] assign[=] call[call[call[name[self].size_distribution_models][name[group]]][constant[lognorm]]][constant[mean]]
variable[log_sd] assign[=] call[call[call[name[self].size_distribution_models][name[group]]][constant[lognorm]]][constant[sd]]
for taget[tuple[[<ast.Name object at 0x7da20c794250>, <ast.Name object at 0x7da20c7967d0>]]] in starred[call[name[enumerate], parameter[name[model_names]]]] begin[:]
variable[multi_predictions] assign[=] call[call[call[call[name[self].size_distribution_models][name[group]]][constant[multi]]][name[model_name]].predict, parameter[call[name[group_data]][name[input_columns]]]]
if name[calibrate] begin[:]
call[name[multi_predictions]][tuple[[<ast.Slice object at 0x7da1b0ea0430>, <ast.Constant object at 0x7da1b0ea2650>]]] assign[=] call[call[call[call[name[self].size_distribution_models][name[group]]][constant[calshape]]][name[model_name]].predict, parameter[call[name[multi_predictions]][tuple[[<ast.Slice object at 0x7da1b0ea3eb0>, <ast.Slice object at 0x7da1b0ea0730>]]]]]
call[name[multi_predictions]][tuple[[<ast.Slice object at 0x7da1b0ea2b90>, <ast.Constant object at 0x7da1b0ea2b60>]]] assign[=] call[call[call[call[name[self].size_distribution_models][name[group]]][constant[calscale]]][name[model_name]].predict, parameter[call[name[multi_predictions]][tuple[[<ast.Slice object at 0x7da1b0ea3640>, <ast.Slice object at 0x7da1b0ea37c0>]]]]]
variable[multi_predictions] assign[=] call[name[np].exp, parameter[binary_operation[binary_operation[name[multi_predictions] * name[log_sd]] + name[log_mean]]]]
if compare[call[name[multi_predictions].shape][constant[1]] equal[==] constant[2]] begin[:]
variable[multi_predictions_temp] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Subscript object at 0x7da1b0ea3910>, <ast.Constant object at 0x7da1b0ea3b50>]]]]
call[name[multi_predictions_temp]][tuple[[<ast.Slice object at 0x7da1b0ea3280>, <ast.Constant object at 0x7da1b0ea3040>]]] assign[=] call[name[multi_predictions]][tuple[[<ast.Slice object at 0x7da1b0ea3070>, <ast.Constant object at 0x7da1b0ea2dd0>]]]
call[name[multi_predictions_temp]][tuple[[<ast.Slice object at 0x7da1b0ea2b30>, <ast.Constant object at 0x7da1b0ea2c80>]]] assign[=] name[location]
call[name[multi_predictions_temp]][tuple[[<ast.Slice object at 0x7da1b0ea2e60>, <ast.Constant object at 0x7da1b0ea2c50>]]] assign[=] call[name[multi_predictions]][tuple[[<ast.Slice object at 0x7da1b0ea2e90>, <ast.Constant object at 0x7da204962680>]]]
variable[multi_predictions] assign[=] name[multi_predictions_temp]
for taget[tuple[[<ast.Name object at 0x7da204960520>, <ast.Name object at 0x7da204960dc0>]]] in starred[call[name[enumerate], parameter[list[[<ast.Constant object at 0x7da204963be0>, <ast.Constant object at 0x7da204960f10>, <ast.Constant object at 0x7da204961000>]]]]] begin[:]
call[call[call[name[predictions]][name[group]]][name[model_name]].loc][tuple[[<ast.Slice object at 0x7da204962f80>, <ast.BinOp object at 0x7da204962650>]]] assign[=] call[name[multi_predictions]][tuple[[<ast.Slice object at 0x7da204960580>, <ast.Name object at 0x7da204961150>]]]
return[name[predictions]] | keyword[def] identifier[predict_size_distribution_models] ( identifier[self] , identifier[model_names] , identifier[input_columns] , identifier[metadata_cols] ,
identifier[data_mode] = literal[string] , identifier[location] = literal[int] , identifier[calibrate] = keyword[False] ):
literal[string]
identifier[groups] = identifier[self] . identifier[size_distribution_models] . identifier[keys] ()
identifier[predictions] ={}
keyword[for] identifier[group] keyword[in] identifier[groups] :
identifier[group_data] = identifier[self] . identifier[data] [ identifier[data_mode] ][ literal[string] ]. identifier[loc] [ identifier[self] . identifier[data] [ identifier[data_mode] ][ literal[string] ][ identifier[self] . identifier[group_col] ]== identifier[group] ]
identifier[predictions] [ identifier[group] ]= identifier[group_data] [ identifier[metadata_cols] ]
keyword[if] identifier[group_data] . identifier[shape] [ literal[int] ]> literal[int] :
identifier[log_mean] = identifier[self] . identifier[size_distribution_models] [ identifier[group] ][ literal[string] ][ literal[string] ]
identifier[log_sd] = identifier[self] . identifier[size_distribution_models] [ identifier[group] ][ literal[string] ][ literal[string] ]
keyword[for] identifier[m] , identifier[model_name] keyword[in] identifier[enumerate] ( identifier[model_names] ):
identifier[multi_predictions] = identifier[self] . identifier[size_distribution_models] [ identifier[group] ][ literal[string] ][ identifier[model_name] ]. identifier[predict] (
identifier[group_data] [ identifier[input_columns] ])
keyword[if] identifier[calibrate] :
identifier[multi_predictions] [:, literal[int] ]= identifier[self] . identifier[size_distribution_models] [ identifier[group] ][ literal[string] ][ identifier[model_name] ]. identifier[predict] (
identifier[multi_predictions] [:, literal[int] : literal[int] ])
identifier[multi_predictions] [:, literal[int] ]= identifier[self] . identifier[size_distribution_models] [ identifier[group] ][ literal[string] ][ identifier[model_name] ]. identifier[predict] (
identifier[multi_predictions] [:, literal[int] :])
identifier[multi_predictions] = identifier[np] . identifier[exp] ( identifier[multi_predictions] * identifier[log_sd] + identifier[log_mean] )
keyword[if] identifier[multi_predictions] . identifier[shape] [ literal[int] ]== literal[int] :
identifier[multi_predictions_temp] = identifier[np] . identifier[zeros] (( identifier[multi_predictions] . identifier[shape] [ literal[int] ], literal[int] ))
identifier[multi_predictions_temp] [:, literal[int] ]= identifier[multi_predictions] [:, literal[int] ]
identifier[multi_predictions_temp] [:, literal[int] ]= identifier[location]
identifier[multi_predictions_temp] [:, literal[int] ]= identifier[multi_predictions] [:, literal[int] ]
identifier[multi_predictions] = identifier[multi_predictions_temp]
keyword[for] identifier[p] , identifier[pred_col] keyword[in] identifier[enumerate] ([ literal[string] , literal[string] , literal[string] ]):
identifier[predictions] [ identifier[group] ][ identifier[model_name] ]. identifier[loc] [:, identifier[model_name] . identifier[replace] ( literal[string] , literal[string] )+ literal[string] + identifier[pred_col] ]= identifier[multi_predictions] [:, identifier[p] ]
keyword[return] identifier[predictions] | def predict_size_distribution_models(self, model_names, input_columns, metadata_cols, data_mode='forecast', location=6, calibrate=False):
"""
Make predictions using fitted size distribution models.
Args:
model_names: Name of the models for predictions
input_columns: Data columns used for input into ML models
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Set of data used as input for prediction models
location: Value of fixed location parameter
calibrate: Whether or not to apply calibration model
Returns:
Predictions in dictionary of data frames grouped by group type
"""
groups = self.size_distribution_models.keys()
predictions = {}
for group in groups:
group_data = self.data[data_mode]['combo'].loc[self.data[data_mode]['combo'][self.group_col] == group]
predictions[group] = group_data[metadata_cols]
if group_data.shape[0] > 0:
log_mean = self.size_distribution_models[group]['lognorm']['mean']
log_sd = self.size_distribution_models[group]['lognorm']['sd']
for (m, model_name) in enumerate(model_names):
multi_predictions = self.size_distribution_models[group]['multi'][model_name].predict(group_data[input_columns])
if calibrate:
multi_predictions[:, 0] = self.size_distribution_models[group]['calshape'][model_name].predict(multi_predictions[:, 0:1])
multi_predictions[:, 1] = self.size_distribution_models[group]['calscale'][model_name].predict(multi_predictions[:, 1:]) # depends on [control=['if'], data=[]]
multi_predictions = np.exp(multi_predictions * log_sd + log_mean)
if multi_predictions.shape[1] == 2:
multi_predictions_temp = np.zeros((multi_predictions.shape[0], 3))
multi_predictions_temp[:, 0] = multi_predictions[:, 0]
multi_predictions_temp[:, 1] = location
multi_predictions_temp[:, 2] = multi_predictions[:, 1]
multi_predictions = multi_predictions_temp # depends on [control=['if'], data=[]]
for (p, pred_col) in enumerate(['shape', 'location', 'scale']):
predictions[group][model_name].loc[:, model_name.replace(' ', '-') + '_' + pred_col] = multi_predictions[:, p] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['group']]
return predictions |
def create_all_psd():
f = pylab.linspace(0, 1, 4096)
pylab.figure(figsize=(12,8))
# MA model
p = spectrum.pma(xx, 64,128); p(); p.plot()
"""
#ARMA 15 order
a, b, rho = spectrum.arma_estimate(data, 15,15, 30)
psd = spectrum.arma2psd(A=a,B=b, rho=rho)
newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq
pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='ARMA 15,15')
"""
# YULE WALKER
p = spectrum.pyule(xx, 7 , NFFT=4096, scale_by_freq=False); p.plot()
# equivalent to
# plot([x for x in p.frequencies()] , 10*log10(p.psd)); grid(True)
#burg method
p = spectrum.pburg(xx, 7, scale_by_freq=False); p.plot()
#pcovar
p = spectrum.pcovar(xx, 7, scale_by_freq=False); p.plot()
#pmodcovar
p = spectrum.pmodcovar(xx, 7, scale_by_freq=False); p.plot()
# correlogram
p = spectrum.pcorrelogram(xx, lag=60, NFFT=512, scale_by_freq=False); p.plot()
# minvar
p = spectrum.pminvar(xx, 7, NFFT=256, scale_by_freq=False); p.plot()
# pmusic
p = spectrum.pmusic(xx, 10,4, scale_by_freq=False); p.plot()
# pmusic
p = spectrum.pev(xx, 10, 4, scale_by_freq=False); p.plot()
# periodogram
p = spectrum.Periodogram(xx, scale_by_freq=False); p.plot()
#
legend( ["MA 32", "pyule 7", "pburg 7", "pcovar", "pmodcovar", "correlogram",
"minvar", "pmusic", "pev", "periodgram"])
pylab.ylim([-80,80]) | def function[create_all_psd, parameter[]]:
variable[f] assign[=] call[name[pylab].linspace, parameter[constant[0], constant[1], constant[4096]]]
call[name[pylab].figure, parameter[]]
variable[p] assign[=] call[name[spectrum].pma, parameter[name[xx], constant[64], constant[128]]]
call[name[p], parameter[]]
call[name[p].plot, parameter[]]
constant[
#ARMA 15 order
a, b, rho = spectrum.arma_estimate(data, 15,15, 30)
psd = spectrum.arma2psd(A=a,B=b, rho=rho)
newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq
pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='ARMA 15,15')
]
variable[p] assign[=] call[name[spectrum].pyule, parameter[name[xx], constant[7]]]
call[name[p].plot, parameter[]]
variable[p] assign[=] call[name[spectrum].pburg, parameter[name[xx], constant[7]]]
call[name[p].plot, parameter[]]
variable[p] assign[=] call[name[spectrum].pcovar, parameter[name[xx], constant[7]]]
call[name[p].plot, parameter[]]
variable[p] assign[=] call[name[spectrum].pmodcovar, parameter[name[xx], constant[7]]]
call[name[p].plot, parameter[]]
variable[p] assign[=] call[name[spectrum].pcorrelogram, parameter[name[xx]]]
call[name[p].plot, parameter[]]
variable[p] assign[=] call[name[spectrum].pminvar, parameter[name[xx], constant[7]]]
call[name[p].plot, parameter[]]
variable[p] assign[=] call[name[spectrum].pmusic, parameter[name[xx], constant[10], constant[4]]]
call[name[p].plot, parameter[]]
variable[p] assign[=] call[name[spectrum].pev, parameter[name[xx], constant[10], constant[4]]]
call[name[p].plot, parameter[]]
variable[p] assign[=] call[name[spectrum].Periodogram, parameter[name[xx]]]
call[name[p].plot, parameter[]]
call[name[legend], parameter[list[[<ast.Constant object at 0x7da18c4cf6d0>, <ast.Constant object at 0x7da18c4ccb50>, <ast.Constant object at 0x7da18c4cea40>, <ast.Constant object at 0x7da18c4ce650>, <ast.Constant object at 0x7da18c4ce5f0>, <ast.Constant object at 0x7da18c4cc7c0>, <ast.Constant object at 0x7da18c4cf310>, <ast.Constant object at 0x7da1b01c3d00>, <ast.Constant object at 0x7da1b01c3760>, <ast.Constant object at 0x7da1b01c3a30>]]]]
call[name[pylab].ylim, parameter[list[[<ast.UnaryOp object at 0x7da1b01c0b80>, <ast.Constant object at 0x7da1b01c3190>]]]] | keyword[def] identifier[create_all_psd] ():
identifier[f] = identifier[pylab] . identifier[linspace] ( literal[int] , literal[int] , literal[int] )
identifier[pylab] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] ))
identifier[p] = identifier[spectrum] . identifier[pma] ( identifier[xx] , literal[int] , literal[int] ); identifier[p] (); identifier[p] . identifier[plot] ()
literal[string]
identifier[p] = identifier[spectrum] . identifier[pyule] ( identifier[xx] , literal[int] , identifier[NFFT] = literal[int] , identifier[scale_by_freq] = keyword[False] ); identifier[p] . identifier[plot] ()
identifier[p] = identifier[spectrum] . identifier[pburg] ( identifier[xx] , literal[int] , identifier[scale_by_freq] = keyword[False] ); identifier[p] . identifier[plot] ()
identifier[p] = identifier[spectrum] . identifier[pcovar] ( identifier[xx] , literal[int] , identifier[scale_by_freq] = keyword[False] ); identifier[p] . identifier[plot] ()
identifier[p] = identifier[spectrum] . identifier[pmodcovar] ( identifier[xx] , literal[int] , identifier[scale_by_freq] = keyword[False] ); identifier[p] . identifier[plot] ()
identifier[p] = identifier[spectrum] . identifier[pcorrelogram] ( identifier[xx] , identifier[lag] = literal[int] , identifier[NFFT] = literal[int] , identifier[scale_by_freq] = keyword[False] ); identifier[p] . identifier[plot] ()
identifier[p] = identifier[spectrum] . identifier[pminvar] ( identifier[xx] , literal[int] , identifier[NFFT] = literal[int] , identifier[scale_by_freq] = keyword[False] ); identifier[p] . identifier[plot] ()
identifier[p] = identifier[spectrum] . identifier[pmusic] ( identifier[xx] , literal[int] , literal[int] , identifier[scale_by_freq] = keyword[False] ); identifier[p] . identifier[plot] ()
identifier[p] = identifier[spectrum] . identifier[pev] ( identifier[xx] , literal[int] , literal[int] , identifier[scale_by_freq] = keyword[False] ); identifier[p] . identifier[plot] ()
identifier[p] = identifier[spectrum] . identifier[Periodogram] ( identifier[xx] , identifier[scale_by_freq] = keyword[False] ); identifier[p] . identifier[plot] ()
identifier[legend] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ])
identifier[pylab] . identifier[ylim] ([- literal[int] , literal[int] ]) | def create_all_psd():
f = pylab.linspace(0, 1, 4096)
pylab.figure(figsize=(12, 8))
# MA model
p = spectrum.pma(xx, 64, 128)
p()
p.plot()
"\n #ARMA 15 order\n a, b, rho = spectrum.arma_estimate(data, 15,15, 30)\n psd = spectrum.arma2psd(A=a,B=b, rho=rho)\n newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq\n pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='ARMA 15,15')\n "
# YULE WALKER
p = spectrum.pyule(xx, 7, NFFT=4096, scale_by_freq=False)
p.plot()
# equivalent to
# plot([x for x in p.frequencies()] , 10*log10(p.psd)); grid(True)
#burg method
p = spectrum.pburg(xx, 7, scale_by_freq=False)
p.plot()
#pcovar
p = spectrum.pcovar(xx, 7, scale_by_freq=False)
p.plot()
#pmodcovar
p = spectrum.pmodcovar(xx, 7, scale_by_freq=False)
p.plot()
# correlogram
p = spectrum.pcorrelogram(xx, lag=60, NFFT=512, scale_by_freq=False)
p.plot()
# minvar
p = spectrum.pminvar(xx, 7, NFFT=256, scale_by_freq=False)
p.plot()
# pmusic
p = spectrum.pmusic(xx, 10, 4, scale_by_freq=False)
p.plot()
# pmusic
p = spectrum.pev(xx, 10, 4, scale_by_freq=False)
p.plot()
# periodogram
p = spectrum.Periodogram(xx, scale_by_freq=False)
p.plot()
#
legend(['MA 32', 'pyule 7', 'pburg 7', 'pcovar', 'pmodcovar', 'correlogram', 'minvar', 'pmusic', 'pev', 'periodgram'])
pylab.ylim([-80, 80]) |
def bubble_to_dot(bblfile:str, dotfile:str=None, render:bool=False,
oriented:bool=False):
"""Write in dotfile a graph equivalent to those depicted in bubble file"""
tree = BubbleTree.from_bubble_file(bblfile, oriented=bool(oriented))
return tree_to_dot(tree, dotfile, render=render) | def function[bubble_to_dot, parameter[bblfile, dotfile, render, oriented]]:
constant[Write in dotfile a graph equivalent to those depicted in bubble file]
variable[tree] assign[=] call[name[BubbleTree].from_bubble_file, parameter[name[bblfile]]]
return[call[name[tree_to_dot], parameter[name[tree], name[dotfile]]]] | keyword[def] identifier[bubble_to_dot] ( identifier[bblfile] : identifier[str] , identifier[dotfile] : identifier[str] = keyword[None] , identifier[render] : identifier[bool] = keyword[False] ,
identifier[oriented] : identifier[bool] = keyword[False] ):
literal[string]
identifier[tree] = identifier[BubbleTree] . identifier[from_bubble_file] ( identifier[bblfile] , identifier[oriented] = identifier[bool] ( identifier[oriented] ))
keyword[return] identifier[tree_to_dot] ( identifier[tree] , identifier[dotfile] , identifier[render] = identifier[render] ) | def bubble_to_dot(bblfile: str, dotfile: str=None, render: bool=False, oriented: bool=False):
"""Write in dotfile a graph equivalent to those depicted in bubble file"""
tree = BubbleTree.from_bubble_file(bblfile, oriented=bool(oriented))
return tree_to_dot(tree, dotfile, render=render) |
def set_deferred_transfer(self, enable):
"""
Allow transfers to be delayed and buffered
By default deferred transfers are turned off. All reads and
writes will be completed by the time the function returns.
When enabled packets are buffered and sent all at once, which
increases speed. When memory is written to, the transfer
might take place immediately, or might take place on a future
memory write. This means that an invalid write could cause an
exception to occur on a later, unrelated write. To guarantee
that previous writes are complete call the flush() function.
The behaviour of read operations is determined by the modes
READ_START, READ_NOW and READ_END. The option READ_NOW is the
default and will cause the read to flush all previous writes,
and read the data immediately. To improve performance, multiple
reads can be made using READ_START and finished later with READ_NOW.
This allows the reads to be buffered and sent at once. Note - All
READ_ENDs must be called before a call using READ_NOW can be made.
"""
if self._deferred_transfer and not enable:
self.flush()
self._deferred_transfer = enable | def function[set_deferred_transfer, parameter[self, enable]]:
constant[
Allow transfers to be delayed and buffered
By default deferred transfers are turned off. All reads and
writes will be completed by the time the function returns.
When enabled packets are buffered and sent all at once, which
increases speed. When memory is written to, the transfer
might take place immediately, or might take place on a future
memory write. This means that an invalid write could cause an
exception to occur on a later, unrelated write. To guarantee
that previous writes are complete call the flush() function.
The behaviour of read operations is determined by the modes
READ_START, READ_NOW and READ_END. The option READ_NOW is the
default and will cause the read to flush all previous writes,
and read the data immediately. To improve performance, multiple
reads can be made using READ_START and finished later with READ_NOW.
This allows the reads to be buffered and sent at once. Note - All
READ_ENDs must be called before a call using READ_NOW can be made.
]
if <ast.BoolOp object at 0x7da1b18bd330> begin[:]
call[name[self].flush, parameter[]]
name[self]._deferred_transfer assign[=] name[enable] | keyword[def] identifier[set_deferred_transfer] ( identifier[self] , identifier[enable] ):
literal[string]
keyword[if] identifier[self] . identifier[_deferred_transfer] keyword[and] keyword[not] identifier[enable] :
identifier[self] . identifier[flush] ()
identifier[self] . identifier[_deferred_transfer] = identifier[enable] | def set_deferred_transfer(self, enable):
"""
Allow transfers to be delayed and buffered
By default deferred transfers are turned off. All reads and
writes will be completed by the time the function returns.
When enabled packets are buffered and sent all at once, which
increases speed. When memory is written to, the transfer
might take place immediately, or might take place on a future
memory write. This means that an invalid write could cause an
exception to occur on a later, unrelated write. To guarantee
that previous writes are complete call the flush() function.
The behaviour of read operations is determined by the modes
READ_START, READ_NOW and READ_END. The option READ_NOW is the
default and will cause the read to flush all previous writes,
and read the data immediately. To improve performance, multiple
reads can be made using READ_START and finished later with READ_NOW.
This allows the reads to be buffered and sent at once. Note - All
READ_ENDs must be called before a call using READ_NOW can be made.
"""
if self._deferred_transfer and (not enable):
self.flush() # depends on [control=['if'], data=[]]
self._deferred_transfer = enable |
def ethereum_address(self) -> str:
"""Generate a random Ethereum address.
.. Note: The address will look like Ethereum address,
but keep in mind that it is not the valid address.
:return: Ethereum address.
:Example:
0xe8ece9e6ff7dba52d4c07d37418036a89af9698d
"""
bits = self.random.getrandbits(160)
address = bits.to_bytes(20, byteorder='big')
return '0x' + address.hex() | def function[ethereum_address, parameter[self]]:
constant[Generate a random Ethereum address.
.. Note: The address will look like Ethereum address,
but keep in mind that it is not the valid address.
:return: Ethereum address.
:Example:
0xe8ece9e6ff7dba52d4c07d37418036a89af9698d
]
variable[bits] assign[=] call[name[self].random.getrandbits, parameter[constant[160]]]
variable[address] assign[=] call[name[bits].to_bytes, parameter[constant[20]]]
return[binary_operation[constant[0x] + call[name[address].hex, parameter[]]]] | keyword[def] identifier[ethereum_address] ( identifier[self] )-> identifier[str] :
literal[string]
identifier[bits] = identifier[self] . identifier[random] . identifier[getrandbits] ( literal[int] )
identifier[address] = identifier[bits] . identifier[to_bytes] ( literal[int] , identifier[byteorder] = literal[string] )
keyword[return] literal[string] + identifier[address] . identifier[hex] () | def ethereum_address(self) -> str:
"""Generate a random Ethereum address.
.. Note: The address will look like Ethereum address,
but keep in mind that it is not the valid address.
:return: Ethereum address.
:Example:
0xe8ece9e6ff7dba52d4c07d37418036a89af9698d
"""
bits = self.random.getrandbits(160)
address = bits.to_bytes(20, byteorder='big')
return '0x' + address.hex() |
def pb(name, data, bucket_count=None, display_name=None, description=None):
"""Create a legacy histogram summary protobuf.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
data: A `np.array` or array-like form of any shape. Must have type
castable to `float`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if bucket_count is None:
bucket_count = summary_v2.DEFAULT_BUCKET_COUNT
data = np.array(data).flatten().astype(float)
if data.size == 0:
buckets = np.array([]).reshape((0, 3))
else:
min_ = np.min(data)
max_ = np.max(data)
range_ = max_ - min_
if range_ == 0:
center = min_
buckets = np.array([[center - 0.5, center + 0.5, float(data.size)]])
else:
bucket_width = range_ / bucket_count
offsets = data - min_
bucket_indices = np.floor(offsets / bucket_width).astype(int)
clamped_indices = np.minimum(bucket_indices, bucket_count - 1)
one_hots = (np.array([clamped_indices]).transpose()
== np.arange(0, bucket_count)) # broadcast
assert one_hots.shape == (data.size, bucket_count), (
one_hots.shape, (data.size, bucket_count))
bucket_counts = np.sum(one_hots, axis=0)
edges = np.linspace(min_, max_, bucket_count + 1)
left_edges = edges[:-1]
right_edges = edges[1:]
buckets = np.array([left_edges, right_edges, bucket_counts]).transpose()
tensor = tf.make_tensor_proto(buckets, dtype=tf.float64)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/histogram_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary | def function[pb, parameter[name, data, bucket_count, display_name, description]]:
constant[Create a legacy histogram summary protobuf.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
data: A `np.array` or array-like form of any shape. Must have type
castable to `float`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
]
import module[tensorflow.compat.v1] as alias[tf]
if compare[name[bucket_count] is constant[None]] begin[:]
variable[bucket_count] assign[=] name[summary_v2].DEFAULT_BUCKET_COUNT
variable[data] assign[=] call[call[call[name[np].array, parameter[name[data]]].flatten, parameter[]].astype, parameter[name[float]]]
if compare[name[data].size equal[==] constant[0]] begin[:]
variable[buckets] assign[=] call[call[name[np].array, parameter[list[[]]]].reshape, parameter[tuple[[<ast.Constant object at 0x7da20e9b0160>, <ast.Constant object at 0x7da20e9b3700>]]]]
variable[tensor] assign[=] call[name[tf].make_tensor_proto, parameter[name[buckets]]]
if compare[name[display_name] is constant[None]] begin[:]
variable[display_name] assign[=] name[name]
variable[summary_metadata] assign[=] call[name[metadata].create_summary_metadata, parameter[]]
variable[tf_summary_metadata] assign[=] call[name[tf].SummaryMetadata.FromString, parameter[call[name[summary_metadata].SerializeToString, parameter[]]]]
variable[summary] assign[=] call[name[tf].Summary, parameter[]]
call[name[summary].value.add, parameter[]]
return[name[summary]] | keyword[def] identifier[pb] ( identifier[name] , identifier[data] , identifier[bucket_count] = keyword[None] , identifier[display_name] = keyword[None] , identifier[description] = keyword[None] ):
literal[string]
keyword[import] identifier[tensorflow] . identifier[compat] . identifier[v1] keyword[as] identifier[tf]
keyword[if] identifier[bucket_count] keyword[is] keyword[None] :
identifier[bucket_count] = identifier[summary_v2] . identifier[DEFAULT_BUCKET_COUNT]
identifier[data] = identifier[np] . identifier[array] ( identifier[data] ). identifier[flatten] (). identifier[astype] ( identifier[float] )
keyword[if] identifier[data] . identifier[size] == literal[int] :
identifier[buckets] = identifier[np] . identifier[array] ([]). identifier[reshape] (( literal[int] , literal[int] ))
keyword[else] :
identifier[min_] = identifier[np] . identifier[min] ( identifier[data] )
identifier[max_] = identifier[np] . identifier[max] ( identifier[data] )
identifier[range_] = identifier[max_] - identifier[min_]
keyword[if] identifier[range_] == literal[int] :
identifier[center] = identifier[min_]
identifier[buckets] = identifier[np] . identifier[array] ([[ identifier[center] - literal[int] , identifier[center] + literal[int] , identifier[float] ( identifier[data] . identifier[size] )]])
keyword[else] :
identifier[bucket_width] = identifier[range_] / identifier[bucket_count]
identifier[offsets] = identifier[data] - identifier[min_]
identifier[bucket_indices] = identifier[np] . identifier[floor] ( identifier[offsets] / identifier[bucket_width] ). identifier[astype] ( identifier[int] )
identifier[clamped_indices] = identifier[np] . identifier[minimum] ( identifier[bucket_indices] , identifier[bucket_count] - literal[int] )
identifier[one_hots] =( identifier[np] . identifier[array] ([ identifier[clamped_indices] ]). identifier[transpose] ()
== identifier[np] . identifier[arange] ( literal[int] , identifier[bucket_count] ))
keyword[assert] identifier[one_hots] . identifier[shape] ==( identifier[data] . identifier[size] , identifier[bucket_count] ),(
identifier[one_hots] . identifier[shape] ,( identifier[data] . identifier[size] , identifier[bucket_count] ))
identifier[bucket_counts] = identifier[np] . identifier[sum] ( identifier[one_hots] , identifier[axis] = literal[int] )
identifier[edges] = identifier[np] . identifier[linspace] ( identifier[min_] , identifier[max_] , identifier[bucket_count] + literal[int] )
identifier[left_edges] = identifier[edges] [:- literal[int] ]
identifier[right_edges] = identifier[edges] [ literal[int] :]
identifier[buckets] = identifier[np] . identifier[array] ([ identifier[left_edges] , identifier[right_edges] , identifier[bucket_counts] ]). identifier[transpose] ()
identifier[tensor] = identifier[tf] . identifier[make_tensor_proto] ( identifier[buckets] , identifier[dtype] = identifier[tf] . identifier[float64] )
keyword[if] identifier[display_name] keyword[is] keyword[None] :
identifier[display_name] = identifier[name]
identifier[summary_metadata] = identifier[metadata] . identifier[create_summary_metadata] (
identifier[display_name] = identifier[display_name] , identifier[description] = identifier[description] )
identifier[tf_summary_metadata] = identifier[tf] . identifier[SummaryMetadata] . identifier[FromString] (
identifier[summary_metadata] . identifier[SerializeToString] ())
identifier[summary] = identifier[tf] . identifier[Summary] ()
identifier[summary] . identifier[value] . identifier[add] ( identifier[tag] = literal[string] % identifier[name] ,
identifier[metadata] = identifier[tf_summary_metadata] ,
identifier[tensor] = identifier[tensor] )
keyword[return] identifier[summary] | def pb(name, data, bucket_count=None, display_name=None, description=None):
"""Create a legacy histogram summary protobuf.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
data: A `np.array` or array-like form of any shape. Must have type
castable to `float`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if bucket_count is None:
bucket_count = summary_v2.DEFAULT_BUCKET_COUNT # depends on [control=['if'], data=['bucket_count']]
data = np.array(data).flatten().astype(float)
if data.size == 0:
buckets = np.array([]).reshape((0, 3)) # depends on [control=['if'], data=[]]
else:
min_ = np.min(data)
max_ = np.max(data)
range_ = max_ - min_
if range_ == 0:
center = min_
buckets = np.array([[center - 0.5, center + 0.5, float(data.size)]]) # depends on [control=['if'], data=[]]
else:
bucket_width = range_ / bucket_count
offsets = data - min_
bucket_indices = np.floor(offsets / bucket_width).astype(int)
clamped_indices = np.minimum(bucket_indices, bucket_count - 1)
one_hots = np.array([clamped_indices]).transpose() == np.arange(0, bucket_count) # broadcast
assert one_hots.shape == (data.size, bucket_count), (one_hots.shape, (data.size, bucket_count))
bucket_counts = np.sum(one_hots, axis=0)
edges = np.linspace(min_, max_, bucket_count + 1)
left_edges = edges[:-1]
right_edges = edges[1:]
buckets = np.array([left_edges, right_edges, bucket_counts]).transpose()
tensor = tf.make_tensor_proto(buckets, dtype=tf.float64)
if display_name is None:
display_name = name # depends on [control=['if'], data=['display_name']]
summary_metadata = metadata.create_summary_metadata(display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/histogram_summary' % name, metadata=tf_summary_metadata, tensor=tensor)
return summary |
def _unpad(self, a, axis, out):
"""Undo padding in an array.
Parameters
----------
a : (..., N, ...) ndarray
array to be trimmed to size `Nin`
axis : int
axis along which to unpad
out : bool
trim the output if True, otherwise the input; the two cases have
their left and right pad sizes reversed
"""
assert a.shape[axis] == self.N
Npad = self.N - self.Nin
if out:
_Npad, Npad_ = Npad - Npad//2, Npad//2
else:
_Npad, Npad_ = Npad//2, Npad - Npad//2
return np.take(a, range(_Npad, self.N - Npad_), axis=axis) | def function[_unpad, parameter[self, a, axis, out]]:
constant[Undo padding in an array.
Parameters
----------
a : (..., N, ...) ndarray
array to be trimmed to size `Nin`
axis : int
axis along which to unpad
out : bool
trim the output if True, otherwise the input; the two cases have
their left and right pad sizes reversed
]
assert[compare[call[name[a].shape][name[axis]] equal[==] name[self].N]]
variable[Npad] assign[=] binary_operation[name[self].N - name[self].Nin]
if name[out] begin[:]
<ast.Tuple object at 0x7da1b19698d0> assign[=] tuple[[<ast.BinOp object at 0x7da1b196bc40>, <ast.BinOp object at 0x7da1b196aef0>]]
return[call[name[np].take, parameter[name[a], call[name[range], parameter[name[_Npad], binary_operation[name[self].N - name[Npad_]]]]]]] | keyword[def] identifier[_unpad] ( identifier[self] , identifier[a] , identifier[axis] , identifier[out] ):
literal[string]
keyword[assert] identifier[a] . identifier[shape] [ identifier[axis] ]== identifier[self] . identifier[N]
identifier[Npad] = identifier[self] . identifier[N] - identifier[self] . identifier[Nin]
keyword[if] identifier[out] :
identifier[_Npad] , identifier[Npad_] = identifier[Npad] - identifier[Npad] // literal[int] , identifier[Npad] // literal[int]
keyword[else] :
identifier[_Npad] , identifier[Npad_] = identifier[Npad] // literal[int] , identifier[Npad] - identifier[Npad] // literal[int]
keyword[return] identifier[np] . identifier[take] ( identifier[a] , identifier[range] ( identifier[_Npad] , identifier[self] . identifier[N] - identifier[Npad_] ), identifier[axis] = identifier[axis] ) | def _unpad(self, a, axis, out):
"""Undo padding in an array.
Parameters
----------
a : (..., N, ...) ndarray
array to be trimmed to size `Nin`
axis : int
axis along which to unpad
out : bool
trim the output if True, otherwise the input; the two cases have
their left and right pad sizes reversed
"""
assert a.shape[axis] == self.N
Npad = self.N - self.Nin
if out:
(_Npad, Npad_) = (Npad - Npad // 2, Npad // 2) # depends on [control=['if'], data=[]]
else:
(_Npad, Npad_) = (Npad // 2, Npad - Npad // 2)
return np.take(a, range(_Npad, self.N - Npad_), axis=axis) |
def writeUndo(self, varBind, **context):
"""Finalize Managed Object Instance modification.
Implements the third (unsuccessful) step of the multi-step workflow
of the SNMP SET command processing (:RFC:`1905#section-4.2.5`).
The goal of the third phase is to roll the Managed Object Instance
being modified back into its previous state. The system transitions
into the *undo* state whenever any of the simultaneously modified
Managed Objects Instances fail on the *commit* state transitioning.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to set
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object.
"""
name, val = varBind
(debug.logger & debug.FLAG_INS and
debug.logger('%s: writeUndo(%s, %r)' % (self, name, val)))
cbFun = context['cbFun']
instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}})
idx = context['idx']
if idx in instances[self.ST_CREATE]:
self.createUndo(varBind, **context)
return
if idx in instances[self.ST_DESTROY]:
self.destroyUndo(varBind, **context)
return
try:
node = self.getBranch(name, **context)
except (error.NoSuchInstanceError, error.NoSuchObjectError) as exc:
cbFun(varBind, **dict(context, error=exc))
else:
node.writeUndo(varBind, **context) | def function[writeUndo, parameter[self, varBind]]:
constant[Finalize Managed Object Instance modification.
Implements the third (unsuccessful) step of the multi-step workflow
of the SNMP SET command processing (:RFC:`1905#section-4.2.5`).
The goal of the third phase is to roll the Managed Object Instance
being modified back into its previous state. The system transitions
into the *undo* state whenever any of the simultaneously modified
Managed Objects Instances fail on the *commit* state transitioning.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to set
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object.
]
<ast.Tuple object at 0x7da20c76ece0> assign[=] name[varBind]
<ast.BoolOp object at 0x7da20c76f310>
variable[cbFun] assign[=] call[name[context]][constant[cbFun]]
variable[instances] assign[=] call[call[name[context]][constant[instances]].setdefault, parameter[name[self].name, dictionary[[<ast.Attribute object at 0x7da20c76c820>, <ast.Attribute object at 0x7da20c76f3a0>], [<ast.Dict object at 0x7da20c76e1d0>, <ast.Dict object at 0x7da20c76ffa0>]]]]
variable[idx] assign[=] call[name[context]][constant[idx]]
if compare[name[idx] in call[name[instances]][name[self].ST_CREATE]] begin[:]
call[name[self].createUndo, parameter[name[varBind]]]
return[None]
if compare[name[idx] in call[name[instances]][name[self].ST_DESTROY]] begin[:]
call[name[self].destroyUndo, parameter[name[varBind]]]
return[None]
<ast.Try object at 0x7da20c76df00> | keyword[def] identifier[writeUndo] ( identifier[self] , identifier[varBind] ,** identifier[context] ):
literal[string]
identifier[name] , identifier[val] = identifier[varBind]
( identifier[debug] . identifier[logger] & identifier[debug] . identifier[FLAG_INS] keyword[and]
identifier[debug] . identifier[logger] ( literal[string] %( identifier[self] , identifier[name] , identifier[val] )))
identifier[cbFun] = identifier[context] [ literal[string] ]
identifier[instances] = identifier[context] [ literal[string] ]. identifier[setdefault] ( identifier[self] . identifier[name] ,{ identifier[self] . identifier[ST_CREATE] :{}, identifier[self] . identifier[ST_DESTROY] :{}})
identifier[idx] = identifier[context] [ literal[string] ]
keyword[if] identifier[idx] keyword[in] identifier[instances] [ identifier[self] . identifier[ST_CREATE] ]:
identifier[self] . identifier[createUndo] ( identifier[varBind] ,** identifier[context] )
keyword[return]
keyword[if] identifier[idx] keyword[in] identifier[instances] [ identifier[self] . identifier[ST_DESTROY] ]:
identifier[self] . identifier[destroyUndo] ( identifier[varBind] ,** identifier[context] )
keyword[return]
keyword[try] :
identifier[node] = identifier[self] . identifier[getBranch] ( identifier[name] ,** identifier[context] )
keyword[except] ( identifier[error] . identifier[NoSuchInstanceError] , identifier[error] . identifier[NoSuchObjectError] ) keyword[as] identifier[exc] :
identifier[cbFun] ( identifier[varBind] ,** identifier[dict] ( identifier[context] , identifier[error] = identifier[exc] ))
keyword[else] :
identifier[node] . identifier[writeUndo] ( identifier[varBind] ,** identifier[context] ) | def writeUndo(self, varBind, **context):
"""Finalize Managed Object Instance modification.
Implements the third (unsuccessful) step of the multi-step workflow
of the SNMP SET command processing (:RFC:`1905#section-4.2.5`).
The goal of the third phase is to roll the Managed Object Instance
being modified back into its previous state. The system transitions
into the *undo* state whenever any of the simultaneously modified
Managed Objects Instances fail on the *commit* state transitioning.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to set
Other Parameters
----------------
\\*\\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object.
"""
(name, val) = varBind
debug.logger & debug.FLAG_INS and debug.logger('%s: writeUndo(%s, %r)' % (self, name, val))
cbFun = context['cbFun']
instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}})
idx = context['idx']
if idx in instances[self.ST_CREATE]:
self.createUndo(varBind, **context)
return # depends on [control=['if'], data=[]]
if idx in instances[self.ST_DESTROY]:
self.destroyUndo(varBind, **context)
return # depends on [control=['if'], data=[]]
try:
node = self.getBranch(name, **context) # depends on [control=['try'], data=[]]
except (error.NoSuchInstanceError, error.NoSuchObjectError) as exc:
cbFun(varBind, **dict(context, error=exc)) # depends on [control=['except'], data=['exc']]
else:
node.writeUndo(varBind, **context) |
def _py2_crc16(value):
"""Calculate the CRC for the value in Python 2
:param str value: The value to return for the CRC Checksum
:rtype: int
"""
crc = 0
for byte in value:
crc = ((crc << 8) & 0xffff) ^ \
_CRC16_LOOKUP[((crc >> 8) ^ ord(byte)) & 0xff]
return crc | def function[_py2_crc16, parameter[value]]:
constant[Calculate the CRC for the value in Python 2
:param str value: The value to return for the CRC Checksum
:rtype: int
]
variable[crc] assign[=] constant[0]
for taget[name[byte]] in starred[name[value]] begin[:]
variable[crc] assign[=] binary_operation[binary_operation[binary_operation[name[crc] <ast.LShift object at 0x7da2590d69e0> constant[8]] <ast.BitAnd object at 0x7da2590d6b60> constant[65535]] <ast.BitXor object at 0x7da2590d6b00> call[name[_CRC16_LOOKUP]][binary_operation[binary_operation[binary_operation[name[crc] <ast.RShift object at 0x7da2590d6a40> constant[8]] <ast.BitXor object at 0x7da2590d6b00> call[name[ord], parameter[name[byte]]]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
return[name[crc]] | keyword[def] identifier[_py2_crc16] ( identifier[value] ):
literal[string]
identifier[crc] = literal[int]
keyword[for] identifier[byte] keyword[in] identifier[value] :
identifier[crc] =(( identifier[crc] << literal[int] )& literal[int] )^ identifier[_CRC16_LOOKUP] [(( identifier[crc] >> literal[int] )^ identifier[ord] ( identifier[byte] ))& literal[int] ]
keyword[return] identifier[crc] | def _py2_crc16(value):
"""Calculate the CRC for the value in Python 2
:param str value: The value to return for the CRC Checksum
:rtype: int
"""
crc = 0
for byte in value:
crc = crc << 8 & 65535 ^ _CRC16_LOOKUP[(crc >> 8 ^ ord(byte)) & 255] # depends on [control=['for'], data=['byte']]
return crc |
def is_blackout(self) -> bool:
"""Does this alert match a blackout period?"""
if not current_app.config['NOTIFICATION_BLACKOUT']:
if self.severity in current_app.config['BLACKOUT_ACCEPT']:
return False
return db.is_blackout_period(self) | def function[is_blackout, parameter[self]]:
constant[Does this alert match a blackout period?]
if <ast.UnaryOp object at 0x7da204963e20> begin[:]
if compare[name[self].severity in call[name[current_app].config][constant[BLACKOUT_ACCEPT]]] begin[:]
return[constant[False]]
return[call[name[db].is_blackout_period, parameter[name[self]]]] | keyword[def] identifier[is_blackout] ( identifier[self] )-> identifier[bool] :
literal[string]
keyword[if] keyword[not] identifier[current_app] . identifier[config] [ literal[string] ]:
keyword[if] identifier[self] . identifier[severity] keyword[in] identifier[current_app] . identifier[config] [ literal[string] ]:
keyword[return] keyword[False]
keyword[return] identifier[db] . identifier[is_blackout_period] ( identifier[self] ) | def is_blackout(self) -> bool:
"""Does this alert match a blackout period?"""
if not current_app.config['NOTIFICATION_BLACKOUT']:
if self.severity in current_app.config['BLACKOUT_ACCEPT']:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return db.is_blackout_period(self) |
def status(reset=None):
'''
Show status of indications received. If optional reset attribute
is True, reset the counter.
'''
global RECEIVED_INDICATION_DICT
for host, count in six.iteritems(RECEIVED_INDICATION_DICT):
print('Host %s Received %s indications' % (host, count))
if reset:
for host in RECEIVED_INDICATION_DICT:
RECEIVED_INDICATION_DICT[host] = 0
print('Host %s Reset: Received %s indications' %
(host, RECEIVED_INDICATION_DICT[host]))
print('counts reset to 0') | def function[status, parameter[reset]]:
constant[
Show status of indications received. If optional reset attribute
is True, reset the counter.
]
<ast.Global object at 0x7da1b0ef6770>
for taget[tuple[[<ast.Name object at 0x7da1b0ef5810>, <ast.Name object at 0x7da1b0ef6680>]]] in starred[call[name[six].iteritems, parameter[name[RECEIVED_INDICATION_DICT]]]] begin[:]
call[name[print], parameter[binary_operation[constant[Host %s Received %s indications] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0ef5510>, <ast.Name object at 0x7da1b0ef5600>]]]]]
if name[reset] begin[:]
for taget[name[host]] in starred[name[RECEIVED_INDICATION_DICT]] begin[:]
call[name[RECEIVED_INDICATION_DICT]][name[host]] assign[=] constant[0]
call[name[print], parameter[binary_operation[constant[Host %s Reset: Received %s indications] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0ef52a0>, <ast.Subscript object at 0x7da204344c40>]]]]]
call[name[print], parameter[constant[counts reset to 0]]] | keyword[def] identifier[status] ( identifier[reset] = keyword[None] ):
literal[string]
keyword[global] identifier[RECEIVED_INDICATION_DICT]
keyword[for] identifier[host] , identifier[count] keyword[in] identifier[six] . identifier[iteritems] ( identifier[RECEIVED_INDICATION_DICT] ):
identifier[print] ( literal[string] %( identifier[host] , identifier[count] ))
keyword[if] identifier[reset] :
keyword[for] identifier[host] keyword[in] identifier[RECEIVED_INDICATION_DICT] :
identifier[RECEIVED_INDICATION_DICT] [ identifier[host] ]= literal[int]
identifier[print] ( literal[string] %
( identifier[host] , identifier[RECEIVED_INDICATION_DICT] [ identifier[host] ]))
identifier[print] ( literal[string] ) | def status(reset=None):
"""
Show status of indications received. If optional reset attribute
is True, reset the counter.
"""
global RECEIVED_INDICATION_DICT
for (host, count) in six.iteritems(RECEIVED_INDICATION_DICT):
print('Host %s Received %s indications' % (host, count)) # depends on [control=['for'], data=[]]
if reset:
for host in RECEIVED_INDICATION_DICT:
RECEIVED_INDICATION_DICT[host] = 0
print('Host %s Reset: Received %s indications' % (host, RECEIVED_INDICATION_DICT[host])) # depends on [control=['for'], data=['host']]
print('counts reset to 0') # depends on [control=['if'], data=[]] |
def _forwardImplementation(self, inbuf, outbuf):
""" Proportional probability method.
"""
assert self.module
propensities = self.module.getActionValues(0)
summedProps = sum(propensities)
probabilities = propensities / summedProps
action = eventGenerator(probabilities)
# action = drawIndex(probabilities)
outbuf[:] = scipy.array([action]) | def function[_forwardImplementation, parameter[self, inbuf, outbuf]]:
constant[ Proportional probability method.
]
assert[name[self].module]
variable[propensities] assign[=] call[name[self].module.getActionValues, parameter[constant[0]]]
variable[summedProps] assign[=] call[name[sum], parameter[name[propensities]]]
variable[probabilities] assign[=] binary_operation[name[propensities] / name[summedProps]]
variable[action] assign[=] call[name[eventGenerator], parameter[name[probabilities]]]
call[name[outbuf]][<ast.Slice object at 0x7da1b255e140>] assign[=] call[name[scipy].array, parameter[list[[<ast.Name object at 0x7da1b255e260>]]]] | keyword[def] identifier[_forwardImplementation] ( identifier[self] , identifier[inbuf] , identifier[outbuf] ):
literal[string]
keyword[assert] identifier[self] . identifier[module]
identifier[propensities] = identifier[self] . identifier[module] . identifier[getActionValues] ( literal[int] )
identifier[summedProps] = identifier[sum] ( identifier[propensities] )
identifier[probabilities] = identifier[propensities] / identifier[summedProps]
identifier[action] = identifier[eventGenerator] ( identifier[probabilities] )
identifier[outbuf] [:]= identifier[scipy] . identifier[array] ([ identifier[action] ]) | def _forwardImplementation(self, inbuf, outbuf):
""" Proportional probability method.
"""
assert self.module
propensities = self.module.getActionValues(0)
summedProps = sum(propensities)
probabilities = propensities / summedProps
action = eventGenerator(probabilities)
# action = drawIndex(probabilities)
outbuf[:] = scipy.array([action]) |
def checkout(cls, order, **kwargs):
"""Checkout cart.
Checkout cart, Making an order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.checkout(order, async=True)
>>> result = thread.get()
:param async bool
:param Order order: Required order details. (required)
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._checkout_with_http_info(order, **kwargs)
else:
(data) = cls._checkout_with_http_info(order, **kwargs)
return data | def function[checkout, parameter[cls, order]]:
constant[Checkout cart.
Checkout cart, Making an order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.checkout(order, async=True)
>>> result = thread.get()
:param async bool
:param Order order: Required order details. (required)
:return: Order
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._checkout_with_http_info, parameter[name[order]]]] | keyword[def] identifier[checkout] ( identifier[cls] , identifier[order] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_checkout_with_http_info] ( identifier[order] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_checkout_with_http_info] ( identifier[order] ,** identifier[kwargs] )
keyword[return] identifier[data] | def checkout(cls, order, **kwargs):
"""Checkout cart.
Checkout cart, Making an order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.checkout(order, async=True)
>>> result = thread.get()
:param async bool
:param Order order: Required order details. (required)
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._checkout_with_http_info(order, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._checkout_with_http_info(order, **kwargs)
return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.