code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def check_page(fn):
"Decorator to protect drawing methods"
@wraps(fn)
def wrapper(self, *args, **kwargs):
if not self.page and not kwargs.get('split_only'):
self.error("No page open, you need to call add_page() first")
else:
return fn(self, *args, **kwargs)
return wrapper
|
def function[check_page, parameter[fn]]:
constant[Decorator to protect drawing methods]
def function[wrapper, parameter[self]]:
if <ast.BoolOp object at 0x7da2044c1cc0> begin[:]
call[name[self].error, parameter[constant[No page open, you need to call add_page() first]]]
return[name[wrapper]]
|
keyword[def] identifier[check_page] ( identifier[fn] ):
literal[string]
@ identifier[wraps] ( identifier[fn] )
keyword[def] identifier[wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[if] keyword[not] identifier[self] . identifier[page] keyword[and] keyword[not] identifier[kwargs] . identifier[get] ( literal[string] ):
identifier[self] . identifier[error] ( literal[string] )
keyword[else] :
keyword[return] identifier[fn] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
|
def check_page(fn):
"""Decorator to protect drawing methods"""
@wraps(fn)
def wrapper(self, *args, **kwargs):
if not self.page and (not kwargs.get('split_only')):
self.error('No page open, you need to call add_page() first') # depends on [control=['if'], data=[]]
else:
return fn(self, *args, **kwargs)
return wrapper
|
def HumanReadableStartType(self):
"""Return a human readable string describing the start type value.
Returns:
str: human readable description of the start type value.
"""
if isinstance(self.start_type, py2to3.STRING_TYPES):
return self.start_type
return human_readable_service_enums.SERVICE_ENUMS['Start'].get(
self.start_type, '{0:d}'.format(self.start_type))
|
def function[HumanReadableStartType, parameter[self]]:
constant[Return a human readable string describing the start type value.
Returns:
str: human readable description of the start type value.
]
if call[name[isinstance], parameter[name[self].start_type, name[py2to3].STRING_TYPES]] begin[:]
return[name[self].start_type]
return[call[call[name[human_readable_service_enums].SERVICE_ENUMS][constant[Start]].get, parameter[name[self].start_type, call[constant[{0:d}].format, parameter[name[self].start_type]]]]]
|
keyword[def] identifier[HumanReadableStartType] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[start_type] , identifier[py2to3] . identifier[STRING_TYPES] ):
keyword[return] identifier[self] . identifier[start_type]
keyword[return] identifier[human_readable_service_enums] . identifier[SERVICE_ENUMS] [ literal[string] ]. identifier[get] (
identifier[self] . identifier[start_type] , literal[string] . identifier[format] ( identifier[self] . identifier[start_type] ))
|
def HumanReadableStartType(self):
"""Return a human readable string describing the start type value.
Returns:
str: human readable description of the start type value.
"""
if isinstance(self.start_type, py2to3.STRING_TYPES):
return self.start_type # depends on [control=['if'], data=[]]
return human_readable_service_enums.SERVICE_ENUMS['Start'].get(self.start_type, '{0:d}'.format(self.start_type))
|
def print(self):
"""
prints to terminal the summray statistics
"""
print("TOTALS -------------------------------------------")
print(json.dumps(self.counts, indent=4, sort_keys=True))
if self.sub_total:
print("\nSUB TOTALS --- based on '%s' ---------" % self.sub_total)
print(json.dumps(self.sub_counts, indent=4, sort_keys=True))
if self.list_blank:
print("\nMISSING nodes for '%s':" % self.list_blank,
len(self.blank))
|
def function[print, parameter[self]]:
constant[
prints to terminal the summray statistics
]
call[name[print], parameter[constant[TOTALS -------------------------------------------]]]
call[name[print], parameter[call[name[json].dumps, parameter[name[self].counts]]]]
if name[self].sub_total begin[:]
call[name[print], parameter[binary_operation[constant[
SUB TOTALS --- based on '%s' ---------] <ast.Mod object at 0x7da2590d6920> name[self].sub_total]]]
call[name[print], parameter[call[name[json].dumps, parameter[name[self].sub_counts]]]]
if name[self].list_blank begin[:]
call[name[print], parameter[binary_operation[constant[
MISSING nodes for '%s':] <ast.Mod object at 0x7da2590d6920> name[self].list_blank], call[name[len], parameter[name[self].blank]]]]
|
keyword[def] identifier[print] ( identifier[self] ):
literal[string]
identifier[print] ( literal[string] )
identifier[print] ( identifier[json] . identifier[dumps] ( identifier[self] . identifier[counts] , identifier[indent] = literal[int] , identifier[sort_keys] = keyword[True] ))
keyword[if] identifier[self] . identifier[sub_total] :
identifier[print] ( literal[string] % identifier[self] . identifier[sub_total] )
identifier[print] ( identifier[json] . identifier[dumps] ( identifier[self] . identifier[sub_counts] , identifier[indent] = literal[int] , identifier[sort_keys] = keyword[True] ))
keyword[if] identifier[self] . identifier[list_blank] :
identifier[print] ( literal[string] % identifier[self] . identifier[list_blank] ,
identifier[len] ( identifier[self] . identifier[blank] ))
|
def print(self):
"""
prints to terminal the summray statistics
"""
print('TOTALS -------------------------------------------')
print(json.dumps(self.counts, indent=4, sort_keys=True))
if self.sub_total:
print("\nSUB TOTALS --- based on '%s' ---------" % self.sub_total)
print(json.dumps(self.sub_counts, indent=4, sort_keys=True)) # depends on [control=['if'], data=[]]
if self.list_blank:
print("\nMISSING nodes for '%s':" % self.list_blank, len(self.blank)) # depends on [control=['if'], data=[]]
|
def group_by(self, *args):
"""
Indica los campos para agrupación
"""
if len(args) == 1:
self.raw_fields_group = args[0].split(',')
else:
self.raw_fields_group = list(args)
return self
|
def function[group_by, parameter[self]]:
constant[
Indica los campos para agrupación
]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[1]] begin[:]
name[self].raw_fields_group assign[=] call[call[name[args]][constant[0]].split, parameter[constant[,]]]
return[name[self]]
|
keyword[def] identifier[group_by] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[self] . identifier[raw_fields_group] = identifier[args] [ literal[int] ]. identifier[split] ( literal[string] )
keyword[else] :
identifier[self] . identifier[raw_fields_group] = identifier[list] ( identifier[args] )
keyword[return] identifier[self]
|
def group_by(self, *args):
"""
Indica los campos para agrupación
"""
if len(args) == 1:
self.raw_fields_group = args[0].split(',') # depends on [control=['if'], data=[]]
else:
self.raw_fields_group = list(args)
return self
|
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not isinstance(case_expr, ir.BooleanValue):
raise TypeError(case_expr)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(cases, results, self.default)
|
def function[when, parameter[self, case_expr, result_expr]]:
constant[
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
]
variable[case_expr] assign[=] call[name[ir].as_value_expr, parameter[name[case_expr]]]
variable[result_expr] assign[=] call[name[ir].as_value_expr, parameter[name[result_expr]]]
if <ast.UnaryOp object at 0x7da20c7cad40> begin[:]
<ast.Raise object at 0x7da20c7ca440>
variable[cases] assign[=] call[name[list], parameter[name[self].cases]]
call[name[cases].append, parameter[name[case_expr]]]
variable[results] assign[=] call[name[list], parameter[name[self].results]]
call[name[results].append, parameter[name[result_expr]]]
return[call[call[name[type], parameter[name[self]]], parameter[name[cases], name[results], name[self].default]]]
|
keyword[def] identifier[when] ( identifier[self] , identifier[case_expr] , identifier[result_expr] ):
literal[string]
identifier[case_expr] = identifier[ir] . identifier[as_value_expr] ( identifier[case_expr] )
identifier[result_expr] = identifier[ir] . identifier[as_value_expr] ( identifier[result_expr] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[case_expr] , identifier[ir] . identifier[BooleanValue] ):
keyword[raise] identifier[TypeError] ( identifier[case_expr] )
identifier[cases] = identifier[list] ( identifier[self] . identifier[cases] )
identifier[cases] . identifier[append] ( identifier[case_expr] )
identifier[results] = identifier[list] ( identifier[self] . identifier[results] )
identifier[results] . identifier[append] ( identifier[result_expr] )
keyword[return] identifier[type] ( identifier[self] )( identifier[cases] , identifier[results] , identifier[self] . identifier[default] )
|
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not isinstance(case_expr, ir.BooleanValue):
raise TypeError(case_expr) # depends on [control=['if'], data=[]]
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(cases, results, self.default)
|
def from_points_list(cls, points):
"""
Create a mesh object from a collection of points.
:param point:
List of :class:`~openquake.hazardlib.geo.point.Point` objects.
:returns:
An instance of :class:`Mesh` with one-dimensional arrays
of coordinates from ``points``.
"""
lons = numpy.zeros(len(points), dtype=float)
lats = lons.copy()
depths = lons.copy()
for i in range(len(points)):
lons[i] = points[i].longitude
lats[i] = points[i].latitude
depths[i] = points[i].depth
if not depths.any():
# all points have zero depth, no need to waste memory
depths = None
return cls(lons, lats, depths)
|
def function[from_points_list, parameter[cls, points]]:
constant[
Create a mesh object from a collection of points.
:param point:
List of :class:`~openquake.hazardlib.geo.point.Point` objects.
:returns:
An instance of :class:`Mesh` with one-dimensional arrays
of coordinates from ``points``.
]
variable[lons] assign[=] call[name[numpy].zeros, parameter[call[name[len], parameter[name[points]]]]]
variable[lats] assign[=] call[name[lons].copy, parameter[]]
variable[depths] assign[=] call[name[lons].copy, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[points]]]]]] begin[:]
call[name[lons]][name[i]] assign[=] call[name[points]][name[i]].longitude
call[name[lats]][name[i]] assign[=] call[name[points]][name[i]].latitude
call[name[depths]][name[i]] assign[=] call[name[points]][name[i]].depth
if <ast.UnaryOp object at 0x7da18bccae00> begin[:]
variable[depths] assign[=] constant[None]
return[call[name[cls], parameter[name[lons], name[lats], name[depths]]]]
|
keyword[def] identifier[from_points_list] ( identifier[cls] , identifier[points] ):
literal[string]
identifier[lons] = identifier[numpy] . identifier[zeros] ( identifier[len] ( identifier[points] ), identifier[dtype] = identifier[float] )
identifier[lats] = identifier[lons] . identifier[copy] ()
identifier[depths] = identifier[lons] . identifier[copy] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[points] )):
identifier[lons] [ identifier[i] ]= identifier[points] [ identifier[i] ]. identifier[longitude]
identifier[lats] [ identifier[i] ]= identifier[points] [ identifier[i] ]. identifier[latitude]
identifier[depths] [ identifier[i] ]= identifier[points] [ identifier[i] ]. identifier[depth]
keyword[if] keyword[not] identifier[depths] . identifier[any] ():
identifier[depths] = keyword[None]
keyword[return] identifier[cls] ( identifier[lons] , identifier[lats] , identifier[depths] )
|
def from_points_list(cls, points):
"""
Create a mesh object from a collection of points.
:param point:
List of :class:`~openquake.hazardlib.geo.point.Point` objects.
:returns:
An instance of :class:`Mesh` with one-dimensional arrays
of coordinates from ``points``.
"""
lons = numpy.zeros(len(points), dtype=float)
lats = lons.copy()
depths = lons.copy()
for i in range(len(points)):
lons[i] = points[i].longitude
lats[i] = points[i].latitude
depths[i] = points[i].depth # depends on [control=['for'], data=['i']]
if not depths.any():
# all points have zero depth, no need to waste memory
depths = None # depends on [control=['if'], data=[]]
return cls(lons, lats, depths)
|
def doc(self):
"""
Return the root Doc element (if there is one)
"""
guess = self
while guess is not None and guess.tag != 'Doc':
guess = guess.parent # If no parent, this will be None
return guess
|
def function[doc, parameter[self]]:
constant[
Return the root Doc element (if there is one)
]
variable[guess] assign[=] name[self]
while <ast.BoolOp object at 0x7da18dc992d0> begin[:]
variable[guess] assign[=] name[guess].parent
return[name[guess]]
|
keyword[def] identifier[doc] ( identifier[self] ):
literal[string]
identifier[guess] = identifier[self]
keyword[while] identifier[guess] keyword[is] keyword[not] keyword[None] keyword[and] identifier[guess] . identifier[tag] != literal[string] :
identifier[guess] = identifier[guess] . identifier[parent]
keyword[return] identifier[guess]
|
def doc(self):
"""
Return the root Doc element (if there is one)
"""
guess = self
while guess is not None and guess.tag != 'Doc':
guess = guess.parent # If no parent, this will be None # depends on [control=['while'], data=[]]
return guess
|
def simple_lmm(snps,pheno,K=None,covs=None, test='lrt',NumIntervalsDelta0=100,NumIntervalsDeltaAlt=0,searchDelta=False):
"""
Univariate fixed effects linear mixed model test for all SNPs
Args:
snps: [N x S] SP.array of S SNPs for N individuals
pheno: [N x 1] SP.array of 1 phenotype for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
NumIntervalsDelta0: number of steps for delta optimization on the null model (100)
NumIntervalsDeltaAlt:number of steps for delta optimization on the alt. model (0 - no optimization)
searchDelta: Carry out delta optimization on the alternative model? if yes We use NumIntervalsDeltaAlt steps
Returns:
limix LMM object
"""
t0=time.time()
if K is None:
K=SP.eye(snps.shape[0])
lm = limix.CLMM()
lm.setK(K)
lm.setSNPs(snps)
lm.setPheno(pheno)
if covs is None:
covs = SP.ones((snps.shape[0],1))
lm.setCovs(covs)
if test=='lrt':
lm.setTestStatistics(0)
elif test=='f':
lm.setTestStatistics(1)
else:
print(test)
raise NotImplementedError("only f or lrt are implemented")
#set number of delta grid optimizations?
lm.setNumIntervals0(NumIntervalsDelta0)
if searchDelta:
lm.setNumIntervalsAlt(NumIntervalsDeltaAlt)
else:
lm.setNumIntervalsAlt(0)
lm.process()
t1=time.time()
print(("finished GWAS testing in %.2f seconds" %(t1-t0)))
return lm
|
def function[simple_lmm, parameter[snps, pheno, K, covs, test, NumIntervalsDelta0, NumIntervalsDeltaAlt, searchDelta]]:
constant[
Univariate fixed effects linear mixed model test for all SNPs
Args:
snps: [N x S] SP.array of S SNPs for N individuals
pheno: [N x 1] SP.array of 1 phenotype for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
NumIntervalsDelta0: number of steps for delta optimization on the null model (100)
NumIntervalsDeltaAlt:number of steps for delta optimization on the alt. model (0 - no optimization)
searchDelta: Carry out delta optimization on the alternative model? if yes We use NumIntervalsDeltaAlt steps
Returns:
limix LMM object
]
variable[t0] assign[=] call[name[time].time, parameter[]]
if compare[name[K] is constant[None]] begin[:]
variable[K] assign[=] call[name[SP].eye, parameter[call[name[snps].shape][constant[0]]]]
variable[lm] assign[=] call[name[limix].CLMM, parameter[]]
call[name[lm].setK, parameter[name[K]]]
call[name[lm].setSNPs, parameter[name[snps]]]
call[name[lm].setPheno, parameter[name[pheno]]]
if compare[name[covs] is constant[None]] begin[:]
variable[covs] assign[=] call[name[SP].ones, parameter[tuple[[<ast.Subscript object at 0x7da2054a4b20>, <ast.Constant object at 0x7da2054a64a0>]]]]
call[name[lm].setCovs, parameter[name[covs]]]
if compare[name[test] equal[==] constant[lrt]] begin[:]
call[name[lm].setTestStatistics, parameter[constant[0]]]
call[name[lm].setNumIntervals0, parameter[name[NumIntervalsDelta0]]]
if name[searchDelta] begin[:]
call[name[lm].setNumIntervalsAlt, parameter[name[NumIntervalsDeltaAlt]]]
call[name[lm].process, parameter[]]
variable[t1] assign[=] call[name[time].time, parameter[]]
call[name[print], parameter[binary_operation[constant[finished GWAS testing in %.2f seconds] <ast.Mod object at 0x7da2590d6920> binary_operation[name[t1] - name[t0]]]]]
return[name[lm]]
|
keyword[def] identifier[simple_lmm] ( identifier[snps] , identifier[pheno] , identifier[K] = keyword[None] , identifier[covs] = keyword[None] , identifier[test] = literal[string] , identifier[NumIntervalsDelta0] = literal[int] , identifier[NumIntervalsDeltaAlt] = literal[int] , identifier[searchDelta] = keyword[False] ):
literal[string]
identifier[t0] = identifier[time] . identifier[time] ()
keyword[if] identifier[K] keyword[is] keyword[None] :
identifier[K] = identifier[SP] . identifier[eye] ( identifier[snps] . identifier[shape] [ literal[int] ])
identifier[lm] = identifier[limix] . identifier[CLMM] ()
identifier[lm] . identifier[setK] ( identifier[K] )
identifier[lm] . identifier[setSNPs] ( identifier[snps] )
identifier[lm] . identifier[setPheno] ( identifier[pheno] )
keyword[if] identifier[covs] keyword[is] keyword[None] :
identifier[covs] = identifier[SP] . identifier[ones] (( identifier[snps] . identifier[shape] [ literal[int] ], literal[int] ))
identifier[lm] . identifier[setCovs] ( identifier[covs] )
keyword[if] identifier[test] == literal[string] :
identifier[lm] . identifier[setTestStatistics] ( literal[int] )
keyword[elif] identifier[test] == literal[string] :
identifier[lm] . identifier[setTestStatistics] ( literal[int] )
keyword[else] :
identifier[print] ( identifier[test] )
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[lm] . identifier[setNumIntervals0] ( identifier[NumIntervalsDelta0] )
keyword[if] identifier[searchDelta] :
identifier[lm] . identifier[setNumIntervalsAlt] ( identifier[NumIntervalsDeltaAlt] )
keyword[else] :
identifier[lm] . identifier[setNumIntervalsAlt] ( literal[int] )
identifier[lm] . identifier[process] ()
identifier[t1] = identifier[time] . identifier[time] ()
identifier[print] (( literal[string] %( identifier[t1] - identifier[t0] )))
keyword[return] identifier[lm]
|
def simple_lmm(snps, pheno, K=None, covs=None, test='lrt', NumIntervalsDelta0=100, NumIntervalsDeltaAlt=0, searchDelta=False):
"""
Univariate fixed effects linear mixed model test for all SNPs
Args:
snps: [N x S] SP.array of S SNPs for N individuals
pheno: [N x 1] SP.array of 1 phenotype for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
NumIntervalsDelta0: number of steps for delta optimization on the null model (100)
NumIntervalsDeltaAlt:number of steps for delta optimization on the alt. model (0 - no optimization)
searchDelta: Carry out delta optimization on the alternative model? if yes We use NumIntervalsDeltaAlt steps
Returns:
limix LMM object
"""
t0 = time.time()
if K is None:
K = SP.eye(snps.shape[0]) # depends on [control=['if'], data=['K']]
lm = limix.CLMM()
lm.setK(K)
lm.setSNPs(snps)
lm.setPheno(pheno)
if covs is None:
covs = SP.ones((snps.shape[0], 1)) # depends on [control=['if'], data=['covs']]
lm.setCovs(covs)
if test == 'lrt':
lm.setTestStatistics(0) # depends on [control=['if'], data=[]]
elif test == 'f':
lm.setTestStatistics(1) # depends on [control=['if'], data=[]]
else:
print(test)
raise NotImplementedError('only f or lrt are implemented')
#set number of delta grid optimizations?
lm.setNumIntervals0(NumIntervalsDelta0)
if searchDelta:
lm.setNumIntervalsAlt(NumIntervalsDeltaAlt) # depends on [control=['if'], data=[]]
else:
lm.setNumIntervalsAlt(0)
lm.process()
t1 = time.time()
print('finished GWAS testing in %.2f seconds' % (t1 - t0))
return lm
|
def load_and_print_resfile(filename, info_dict=None):
"""Load a raw data file and print information.
Args:
filename (str): name of the resfile.
info_dict (dict):
Returns:
info (str): string describing something.
"""
# self.test_no = None
# self.mass = 1.0 # mass of (active) material (in mg)
# self.no_cycles = 0.0
# self.charge_steps = None # not in use at the moment
# self.discharge_steps = None # not in use at the moment
# self.ir_steps = None # dict # not in use at the moment
# self.ocv_steps = None # dict # not in use at the moment
# self.nom_cap = 3579 # mAh/g (used for finding c-rates)
# self.mass_given = False
# self.c_mode = True
# self.starts_with = "discharge"
# self.material = "noname"
# self.merged = False
# self.file_errors = None # not in use at the moment
# self.loaded_from = None # name of the .res file it is loaded from
# (can be list if merged)
# self.raw_data_files = []
# self.raw_data_files_length = []
# # self.parent_filename = None # name of the .res file it is loaded from
# (basename) (can be list if merded)
# # self.parent_filename = if listtype, for file in etc,,,
# os.path.basename(self.loaded_from)
# self.channel_index = None
# self.channel_number = None
# self.creator = None
# self.item_ID = None
# self.schedule_file_name = None
# self.start_datetime = None
# self.test_ID = None
# self.name = None
# NEXT: include nom_cap, tot_mass and parameters table in save/load hdf5
if info_dict is None:
info_dict = dict()
info_dict["mass"] = 1.23 # mg
info_dict["nom_cap"] = 3600 # mAh/g (active material)
info_dict["tot_mass"] = 2.33 # mAh/g (total mass of material)
d = CellpyData()
print("filename:", filename)
print("info_dict in:", end=' ')
print(info_dict)
d.from_raw(filename)
d.set_mass(info_dict["mass"])
d.make_step_table()
d.make_summary()
for test in d.datasets:
print("newtest")
print(test)
return info_dict
|
def function[load_and_print_resfile, parameter[filename, info_dict]]:
constant[Load a raw data file and print information.
Args:
filename (str): name of the resfile.
info_dict (dict):
Returns:
info (str): string describing something.
]
if compare[name[info_dict] is constant[None]] begin[:]
variable[info_dict] assign[=] call[name[dict], parameter[]]
call[name[info_dict]][constant[mass]] assign[=] constant[1.23]
call[name[info_dict]][constant[nom_cap]] assign[=] constant[3600]
call[name[info_dict]][constant[tot_mass]] assign[=] constant[2.33]
variable[d] assign[=] call[name[CellpyData], parameter[]]
call[name[print], parameter[constant[filename:], name[filename]]]
call[name[print], parameter[constant[info_dict in:]]]
call[name[print], parameter[name[info_dict]]]
call[name[d].from_raw, parameter[name[filename]]]
call[name[d].set_mass, parameter[call[name[info_dict]][constant[mass]]]]
call[name[d].make_step_table, parameter[]]
call[name[d].make_summary, parameter[]]
for taget[name[test]] in starred[name[d].datasets] begin[:]
call[name[print], parameter[constant[newtest]]]
call[name[print], parameter[name[test]]]
return[name[info_dict]]
|
keyword[def] identifier[load_and_print_resfile] ( identifier[filename] , identifier[info_dict] = keyword[None] ):
literal[string]
keyword[if] identifier[info_dict] keyword[is] keyword[None] :
identifier[info_dict] = identifier[dict] ()
identifier[info_dict] [ literal[string] ]= literal[int]
identifier[info_dict] [ literal[string] ]= literal[int]
identifier[info_dict] [ literal[string] ]= literal[int]
identifier[d] = identifier[CellpyData] ()
identifier[print] ( literal[string] , identifier[filename] )
identifier[print] ( literal[string] , identifier[end] = literal[string] )
identifier[print] ( identifier[info_dict] )
identifier[d] . identifier[from_raw] ( identifier[filename] )
identifier[d] . identifier[set_mass] ( identifier[info_dict] [ literal[string] ])
identifier[d] . identifier[make_step_table] ()
identifier[d] . identifier[make_summary] ()
keyword[for] identifier[test] keyword[in] identifier[d] . identifier[datasets] :
identifier[print] ( literal[string] )
identifier[print] ( identifier[test] )
keyword[return] identifier[info_dict]
|
def load_and_print_resfile(filename, info_dict=None):
"""Load a raw data file and print information.
Args:
filename (str): name of the resfile.
info_dict (dict):
Returns:
info (str): string describing something.
"""
# self.test_no = None
# self.mass = 1.0 # mass of (active) material (in mg)
# self.no_cycles = 0.0
# self.charge_steps = None # not in use at the moment
# self.discharge_steps = None # not in use at the moment
# self.ir_steps = None # dict # not in use at the moment
# self.ocv_steps = None # dict # not in use at the moment
# self.nom_cap = 3579 # mAh/g (used for finding c-rates)
# self.mass_given = False
# self.c_mode = True
# self.starts_with = "discharge"
# self.material = "noname"
# self.merged = False
# self.file_errors = None # not in use at the moment
# self.loaded_from = None # name of the .res file it is loaded from
# (can be list if merged)
# self.raw_data_files = []
# self.raw_data_files_length = []
# # self.parent_filename = None # name of the .res file it is loaded from
# (basename) (can be list if merded)
# # self.parent_filename = if listtype, for file in etc,,,
# os.path.basename(self.loaded_from)
# self.channel_index = None
# self.channel_number = None
# self.creator = None
# self.item_ID = None
# self.schedule_file_name = None
# self.start_datetime = None
# self.test_ID = None
# self.name = None
# NEXT: include nom_cap, tot_mass and parameters table in save/load hdf5
if info_dict is None:
info_dict = dict()
info_dict['mass'] = 1.23 # mg
info_dict['nom_cap'] = 3600 # mAh/g (active material)
info_dict['tot_mass'] = 2.33 # mAh/g (total mass of material) # depends on [control=['if'], data=['info_dict']]
d = CellpyData()
print('filename:', filename)
print('info_dict in:', end=' ')
print(info_dict)
d.from_raw(filename)
d.set_mass(info_dict['mass'])
d.make_step_table()
d.make_summary()
for test in d.datasets:
print('newtest')
print(test) # depends on [control=['for'], data=['test']]
return info_dict
|
def doc2md(docstr, title, min_level=1, more_info=False, toc=True, maxdepth=0):
"""
Convert a docstring to a markdown text.
"""
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
shiftlevel = 0
if level < min_level:
shiftlevel = min_level - level
level = min_level
sections = [(lev+shiftlevel, tit) for lev,tit in sections]
head = next((i for i, l in enumerate(lines) if is_heading(l)), 0)
md = [
make_heading(level, title),
"",
] + lines[:head]
if toc:
md += make_toc(sections, maxdepth)
md += ['']
md += _doc2md(lines[head:], shiftlevel)
if more_info:
return (md, sections)
else:
return "\n".join(md)
|
def function[doc2md, parameter[docstr, title, min_level, more_info, toc, maxdepth]]:
constant[
Convert a docstring to a markdown text.
]
variable[text] assign[=] call[name[doctrim], parameter[name[docstr]]]
variable[lines] assign[=] call[name[text].split, parameter[constant[
]]]
variable[sections] assign[=] call[name[find_sections], parameter[name[lines]]]
if name[sections] begin[:]
variable[level] assign[=] binary_operation[call[name[min], parameter[<ast.GeneratorExp object at 0x7da1b00145e0>]] - constant[1]]
variable[shiftlevel] assign[=] constant[0]
if compare[name[level] less[<] name[min_level]] begin[:]
variable[shiftlevel] assign[=] binary_operation[name[min_level] - name[level]]
variable[level] assign[=] name[min_level]
variable[sections] assign[=] <ast.ListComp object at 0x7da1b012c5e0>
variable[head] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da1b012d5a0>, constant[0]]]
variable[md] assign[=] binary_operation[list[[<ast.Call object at 0x7da1b012d690>, <ast.Constant object at 0x7da1b012e5c0>]] + call[name[lines]][<ast.Slice object at 0x7da1b012e740>]]
if name[toc] begin[:]
<ast.AugAssign object at 0x7da1b012d3c0>
<ast.AugAssign object at 0x7da1b012faf0>
<ast.AugAssign object at 0x7da1b0088940>
if name[more_info] begin[:]
return[tuple[[<ast.Name object at 0x7da1b008afb0>, <ast.Name object at 0x7da1b008aef0>]]]
|
keyword[def] identifier[doc2md] ( identifier[docstr] , identifier[title] , identifier[min_level] = literal[int] , identifier[more_info] = keyword[False] , identifier[toc] = keyword[True] , identifier[maxdepth] = literal[int] ):
literal[string]
identifier[text] = identifier[doctrim] ( identifier[docstr] )
identifier[lines] = identifier[text] . identifier[split] ( literal[string] )
identifier[sections] = identifier[find_sections] ( identifier[lines] )
keyword[if] identifier[sections] :
identifier[level] = identifier[min] ( identifier[n] keyword[for] identifier[n] , identifier[t] keyword[in] identifier[sections] )- literal[int]
keyword[else] :
identifier[level] = literal[int]
identifier[shiftlevel] = literal[int]
keyword[if] identifier[level] < identifier[min_level] :
identifier[shiftlevel] = identifier[min_level] - identifier[level]
identifier[level] = identifier[min_level]
identifier[sections] =[( identifier[lev] + identifier[shiftlevel] , identifier[tit] ) keyword[for] identifier[lev] , identifier[tit] keyword[in] identifier[sections] ]
identifier[head] = identifier[next] (( identifier[i] keyword[for] identifier[i] , identifier[l] keyword[in] identifier[enumerate] ( identifier[lines] ) keyword[if] identifier[is_heading] ( identifier[l] )), literal[int] )
identifier[md] =[
identifier[make_heading] ( identifier[level] , identifier[title] ),
literal[string] ,
]+ identifier[lines] [: identifier[head] ]
keyword[if] identifier[toc] :
identifier[md] += identifier[make_toc] ( identifier[sections] , identifier[maxdepth] )
identifier[md] +=[ literal[string] ]
identifier[md] += identifier[_doc2md] ( identifier[lines] [ identifier[head] :], identifier[shiftlevel] )
keyword[if] identifier[more_info] :
keyword[return] ( identifier[md] , identifier[sections] )
keyword[else] :
keyword[return] literal[string] . identifier[join] ( identifier[md] )
|
def doc2md(docstr, title, min_level=1, more_info=False, toc=True, maxdepth=0):
"""
Convert a docstring to a markdown text.
"""
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min((n for (n, t) in sections)) - 1 # depends on [control=['if'], data=[]]
else:
level = 1
shiftlevel = 0
if level < min_level:
shiftlevel = min_level - level
level = min_level
sections = [(lev + shiftlevel, tit) for (lev, tit) in sections] # depends on [control=['if'], data=['level', 'min_level']]
head = next((i for (i, l) in enumerate(lines) if is_heading(l)), 0)
md = [make_heading(level, title), ''] + lines[:head]
if toc:
md += make_toc(sections, maxdepth)
md += [''] # depends on [control=['if'], data=[]]
md += _doc2md(lines[head:], shiftlevel)
if more_info:
return (md, sections) # depends on [control=['if'], data=[]]
else:
return '\n'.join(md)
|
def autodiscover():
"""
Perform an autodiscover of an api.py file in the installed apps to
generate the routes of the registered viewsets.
"""
for app in settings.INSTALLED_APPS:
try:
import_module('.'.join((app, 'api')))
except ImportError as e:
if e.msg != "No module named '{0}.api'".format(app):
print(e.msg)
|
def function[autodiscover, parameter[]]:
constant[
Perform an autodiscover of an api.py file in the installed apps to
generate the routes of the registered viewsets.
]
for taget[name[app]] in starred[name[settings].INSTALLED_APPS] begin[:]
<ast.Try object at 0x7da20c6a85e0>
|
keyword[def] identifier[autodiscover] ():
literal[string]
keyword[for] identifier[app] keyword[in] identifier[settings] . identifier[INSTALLED_APPS] :
keyword[try] :
identifier[import_module] ( literal[string] . identifier[join] (( identifier[app] , literal[string] )))
keyword[except] identifier[ImportError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[msg] != literal[string] . identifier[format] ( identifier[app] ):
identifier[print] ( identifier[e] . identifier[msg] )
|
def autodiscover():
"""
Perform an autodiscover of an api.py file in the installed apps to
generate the routes of the registered viewsets.
"""
for app in settings.INSTALLED_APPS:
try:
import_module('.'.join((app, 'api'))) # depends on [control=['try'], data=[]]
except ImportError as e:
if e.msg != "No module named '{0}.api'".format(app):
print(e.msg) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['app']]
|
def version():
'''
Return the system version for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
.. versionchanged:: 2018.3.0
Added support for OpenBSD
CLI Example:
.. code-block:: bash
salt '*' status.version
'''
def linux_version():
'''
linux specific implementation of version
'''
try:
with salt.utils.files.fopen('/proc/version', 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError:
return {}
def bsd_version():
'''
bsd specific implementation of version
'''
return __salt__['cmd.run']('sysctl -n kern.version')
# dict that returns a function that does the right thing per platform
get_version = {
'Linux': linux_version,
'FreeBSD': bsd_version,
'OpenBSD': bsd_version,
'AIX': lambda: __salt__['cmd.run']('oslevel -s'),
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)()
|
def function[version, parameter[]]:
constant[
Return the system version for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
.. versionchanged:: 2018.3.0
Added support for OpenBSD
CLI Example:
.. code-block:: bash
salt '*' status.version
]
def function[linux_version, parameter[]]:
constant[
linux specific implementation of version
]
<ast.Try object at 0x7da2054a4760>
def function[bsd_version, parameter[]]:
constant[
bsd specific implementation of version
]
return[call[call[name[__salt__]][constant[cmd.run]], parameter[constant[sysctl -n kern.version]]]]
variable[get_version] assign[=] dictionary[[<ast.Constant object at 0x7da18dc062c0>, <ast.Constant object at 0x7da18dc07940>, <ast.Constant object at 0x7da18dc06ec0>, <ast.Constant object at 0x7da18dc05660>], [<ast.Name object at 0x7da18dc05bd0>, <ast.Name object at 0x7da18dc04eb0>, <ast.Name object at 0x7da18dc062f0>, <ast.Lambda object at 0x7da18dc05cf0>]]
variable[errmsg] assign[=] constant[This method is unsupported on the current operating system!]
return[call[call[name[get_version].get, parameter[call[name[__grains__]][constant[kernel]], <ast.Lambda object at 0x7da18dc04550>]], parameter[]]]
|
keyword[def] identifier[version] ():
literal[string]
keyword[def] identifier[linux_version] ():
literal[string]
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( literal[string] , literal[string] ) keyword[as] identifier[fp_] :
keyword[return] identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[fp_] . identifier[read] ()). identifier[strip] ()
keyword[except] identifier[IOError] :
keyword[return] {}
keyword[def] identifier[bsd_version] ():
literal[string]
keyword[return] identifier[__salt__] [ literal[string] ]( literal[string] )
identifier[get_version] ={
literal[string] : identifier[linux_version] ,
literal[string] : identifier[bsd_version] ,
literal[string] : identifier[bsd_version] ,
literal[string] : keyword[lambda] : identifier[__salt__] [ literal[string] ]( literal[string] ),
}
identifier[errmsg] = literal[string]
keyword[return] identifier[get_version] . identifier[get] ( identifier[__grains__] [ literal[string] ], keyword[lambda] : identifier[errmsg] )()
|
def version():
"""
Return the system version for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
.. versionchanged:: 2018.3.0
Added support for OpenBSD
CLI Example:
.. code-block:: bash
salt '*' status.version
"""
def linux_version():
"""
linux specific implementation of version
"""
try:
with salt.utils.files.fopen('/proc/version', 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip() # depends on [control=['with'], data=['fp_']] # depends on [control=['try'], data=[]]
except IOError:
return {} # depends on [control=['except'], data=[]]
def bsd_version():
"""
bsd specific implementation of version
"""
return __salt__['cmd.run']('sysctl -n kern.version')
# dict that returns a function that does the right thing per platform
get_version = {'Linux': linux_version, 'FreeBSD': bsd_version, 'OpenBSD': bsd_version, 'AIX': lambda : __salt__['cmd.run']('oslevel -s')}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda : errmsg)()
|
def approve_announcement_view(request, req_id):
"""The approve announcement page. Teachers will be linked to this page from an email.
req_id: The ID of the AnnouncementRequest
"""
req = get_object_or_404(AnnouncementRequest, id=req_id)
requested_teachers = req.teachers_requested.all()
logger.debug(requested_teachers)
if request.user not in requested_teachers:
messages.error(request, "You do not have permission to approve this announcement.")
return redirect("index")
if request.method == "POST":
form = AnnouncementRequestForm(request.POST, instance=req)
if form.is_valid():
obj = form.save(commit=True)
# SAFE HTML
obj.content = safe_html(obj.content)
obj.save()
if "approve" in request.POST:
obj.teachers_approved.add(request.user)
obj.save()
if not obj.admin_email_sent:
if settings.SEND_ANNOUNCEMENT_APPROVAL:
admin_request_announcement_email(request, form, obj)
obj.admin_email_sent = True
obj.save()
return redirect("approve_announcement_success")
else:
obj.save()
return redirect("approve_announcement_reject")
form = AnnouncementRequestForm(instance=req)
context = {"form": form, "req": req, "admin_approve": False}
return render(request, "announcements/approve.html", context)
|
def function[approve_announcement_view, parameter[request, req_id]]:
constant[The approve announcement page. Teachers will be linked to this page from an email.
req_id: The ID of the AnnouncementRequest
]
variable[req] assign[=] call[name[get_object_or_404], parameter[name[AnnouncementRequest]]]
variable[requested_teachers] assign[=] call[name[req].teachers_requested.all, parameter[]]
call[name[logger].debug, parameter[name[requested_teachers]]]
if compare[name[request].user <ast.NotIn object at 0x7da2590d7190> name[requested_teachers]] begin[:]
call[name[messages].error, parameter[name[request], constant[You do not have permission to approve this announcement.]]]
return[call[name[redirect], parameter[constant[index]]]]
if compare[name[request].method equal[==] constant[POST]] begin[:]
variable[form] assign[=] call[name[AnnouncementRequestForm], parameter[name[request].POST]]
if call[name[form].is_valid, parameter[]] begin[:]
variable[obj] assign[=] call[name[form].save, parameter[]]
name[obj].content assign[=] call[name[safe_html], parameter[name[obj].content]]
call[name[obj].save, parameter[]]
if compare[constant[approve] in name[request].POST] begin[:]
call[name[obj].teachers_approved.add, parameter[name[request].user]]
call[name[obj].save, parameter[]]
if <ast.UnaryOp object at 0x7da1b0430910> begin[:]
if name[settings].SEND_ANNOUNCEMENT_APPROVAL begin[:]
call[name[admin_request_announcement_email], parameter[name[request], name[form], name[obj]]]
name[obj].admin_email_sent assign[=] constant[True]
call[name[obj].save, parameter[]]
return[call[name[redirect], parameter[constant[approve_announcement_success]]]]
variable[form] assign[=] call[name[AnnouncementRequestForm], parameter[]]
variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da1b0431a80>, <ast.Constant object at 0x7da1b0432a40>, <ast.Constant object at 0x7da1b0432a70>], [<ast.Name object at 0x7da1b0433070>, <ast.Name object at 0x7da1b04330d0>, <ast.Constant object at 0x7da1b0430e50>]]
return[call[name[render], parameter[name[request], constant[announcements/approve.html], name[context]]]]
|
keyword[def] identifier[approve_announcement_view] ( identifier[request] , identifier[req_id] ):
literal[string]
identifier[req] = identifier[get_object_or_404] ( identifier[AnnouncementRequest] , identifier[id] = identifier[req_id] )
identifier[requested_teachers] = identifier[req] . identifier[teachers_requested] . identifier[all] ()
identifier[logger] . identifier[debug] ( identifier[requested_teachers] )
keyword[if] identifier[request] . identifier[user] keyword[not] keyword[in] identifier[requested_teachers] :
identifier[messages] . identifier[error] ( identifier[request] , literal[string] )
keyword[return] identifier[redirect] ( literal[string] )
keyword[if] identifier[request] . identifier[method] == literal[string] :
identifier[form] = identifier[AnnouncementRequestForm] ( identifier[request] . identifier[POST] , identifier[instance] = identifier[req] )
keyword[if] identifier[form] . identifier[is_valid] ():
identifier[obj] = identifier[form] . identifier[save] ( identifier[commit] = keyword[True] )
identifier[obj] . identifier[content] = identifier[safe_html] ( identifier[obj] . identifier[content] )
identifier[obj] . identifier[save] ()
keyword[if] literal[string] keyword[in] identifier[request] . identifier[POST] :
identifier[obj] . identifier[teachers_approved] . identifier[add] ( identifier[request] . identifier[user] )
identifier[obj] . identifier[save] ()
keyword[if] keyword[not] identifier[obj] . identifier[admin_email_sent] :
keyword[if] identifier[settings] . identifier[SEND_ANNOUNCEMENT_APPROVAL] :
identifier[admin_request_announcement_email] ( identifier[request] , identifier[form] , identifier[obj] )
identifier[obj] . identifier[admin_email_sent] = keyword[True]
identifier[obj] . identifier[save] ()
keyword[return] identifier[redirect] ( literal[string] )
keyword[else] :
identifier[obj] . identifier[save] ()
keyword[return] identifier[redirect] ( literal[string] )
identifier[form] = identifier[AnnouncementRequestForm] ( identifier[instance] = identifier[req] )
identifier[context] ={ literal[string] : identifier[form] , literal[string] : identifier[req] , literal[string] : keyword[False] }
keyword[return] identifier[render] ( identifier[request] , literal[string] , identifier[context] )
|
def approve_announcement_view(request, req_id):
"""The approve announcement page. Teachers will be linked to this page from an email.
req_id: The ID of the AnnouncementRequest
"""
req = get_object_or_404(AnnouncementRequest, id=req_id)
requested_teachers = req.teachers_requested.all()
logger.debug(requested_teachers)
if request.user not in requested_teachers:
messages.error(request, 'You do not have permission to approve this announcement.')
return redirect('index') # depends on [control=['if'], data=[]]
if request.method == 'POST':
form = AnnouncementRequestForm(request.POST, instance=req)
if form.is_valid():
obj = form.save(commit=True)
# SAFE HTML
obj.content = safe_html(obj.content)
obj.save()
if 'approve' in request.POST:
obj.teachers_approved.add(request.user)
obj.save()
if not obj.admin_email_sent:
if settings.SEND_ANNOUNCEMENT_APPROVAL:
admin_request_announcement_email(request, form, obj) # depends on [control=['if'], data=[]]
obj.admin_email_sent = True
obj.save() # depends on [control=['if'], data=[]]
return redirect('approve_announcement_success') # depends on [control=['if'], data=[]]
else:
obj.save()
return redirect('approve_announcement_reject') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
form = AnnouncementRequestForm(instance=req)
context = {'form': form, 'req': req, 'admin_approve': False}
return render(request, 'announcements/approve.html', context)
|
def storagePath(self, location):
"""
Returns the path associated with this application and user for the
given location.
:param location | <QtGui.QDesktopServices.StandardLocation>
:return <str>
"""
default = nativestring(QtGui.QDesktopServices.storageLocation(location))
return self._storagePaths.get(location, default)
|
def function[storagePath, parameter[self, location]]:
constant[
Returns the path associated with this application and user for the
given location.
:param location | <QtGui.QDesktopServices.StandardLocation>
:return <str>
]
variable[default] assign[=] call[name[nativestring], parameter[call[name[QtGui].QDesktopServices.storageLocation, parameter[name[location]]]]]
return[call[name[self]._storagePaths.get, parameter[name[location], name[default]]]]
|
keyword[def] identifier[storagePath] ( identifier[self] , identifier[location] ):
literal[string]
identifier[default] = identifier[nativestring] ( identifier[QtGui] . identifier[QDesktopServices] . identifier[storageLocation] ( identifier[location] ))
keyword[return] identifier[self] . identifier[_storagePaths] . identifier[get] ( identifier[location] , identifier[default] )
|
def storagePath(self, location):
"""
Returns the path associated with this application and user for the
given location.
:param location | <QtGui.QDesktopServices.StandardLocation>
:return <str>
"""
default = nativestring(QtGui.QDesktopServices.storageLocation(location))
return self._storagePaths.get(location, default)
|
def run(self):
"""
This method is the actual implementation of the job. By default, it calls
the target function specified in the #Job constructor.
"""
if self.__target is not None:
return self.__target(self, *self.__args, **self.__kwargs)
raise NotImplementedError
|
def function[run, parameter[self]]:
constant[
This method is the actual implementation of the job. By default, it calls
the target function specified in the #Job constructor.
]
if compare[name[self].__target is_not constant[None]] begin[:]
return[call[name[self].__target, parameter[name[self], <ast.Starred object at 0x7da2046203d0>]]]
<ast.Raise object at 0x7da204623220>
|
keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__target] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[__target] ( identifier[self] ,* identifier[self] . identifier[__args] ,** identifier[self] . identifier[__kwargs] )
keyword[raise] identifier[NotImplementedError]
|
def run(self):
"""
This method is the actual implementation of the job. By default, it calls
the target function specified in the #Job constructor.
"""
if self.__target is not None:
return self.__target(self, *self.__args, **self.__kwargs) # depends on [control=['if'], data=[]]
raise NotImplementedError
|
def main() -> None:
"""
Command-line processor. See ``--help`` for details.
"""
main_only_quicksetup_rootlogger(level=logging.DEBUG)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"input_file",
help="Input PDF (which is not modified by this program)")
parser.add_argument(
"output_file",
help="Output PDF")
parser.add_argument(
"--slice_horiz", type=int, default=1,
help="Slice the input PDF first into this many parts horizontally")
parser.add_argument(
"--slice_vert", type=int, default=1,
help="Slice the input PDF first into this many parts vertically")
parser.add_argument(
"--longedge", action="store_true",
help="Create PDF for long-edge duplex printing, not short edge")
parser.add_argument(
"--overwrite", action="store_true",
help="Allow overwriting of an existing output file")
parser.add_argument(
"--unittest", action="store_true",
help="Run unit tests and exit (you must pass dummy values for "
"input/output files to use these tests)")
# ... because requiring dummy input/output filenames for unit testing
# is less confusing for the majority of users than showing syntax in
# which they are optional!
args = parser.parse_args()
if args.unittest:
log.warning("Performing unit tests")
# unittest.main() doesn't play nicely with argparse; they both
# use sys.argv by default (and we end up with what looks like garbage
# from the argparse help facility); but this works:
unittest.main(argv=[sys.argv[0]])
sys.exit(EXIT_SUCCESS)
success = convert_to_foldable(
input_filename=os.path.abspath(args.input_file),
output_filename=os.path.abspath(args.output_file),
slice_horiz=args.slice_horiz,
slice_vert=args.slice_vert,
overwrite=args.overwrite,
longedge=args.longedge
)
sys.exit(EXIT_SUCCESS if success else EXIT_FAILURE)
|
def function[main, parameter[]]:
constant[
Command-line processor. See ``--help`` for details.
]
call[name[main_only_quicksetup_rootlogger], parameter[]]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[input_file]]]
call[name[parser].add_argument, parameter[constant[output_file]]]
call[name[parser].add_argument, parameter[constant[--slice_horiz]]]
call[name[parser].add_argument, parameter[constant[--slice_vert]]]
call[name[parser].add_argument, parameter[constant[--longedge]]]
call[name[parser].add_argument, parameter[constant[--overwrite]]]
call[name[parser].add_argument, parameter[constant[--unittest]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
if name[args].unittest begin[:]
call[name[log].warning, parameter[constant[Performing unit tests]]]
call[name[unittest].main, parameter[]]
call[name[sys].exit, parameter[name[EXIT_SUCCESS]]]
variable[success] assign[=] call[name[convert_to_foldable], parameter[]]
call[name[sys].exit, parameter[<ast.IfExp object at 0x7da1b184b370>]]
|
keyword[def] identifier[main] ()-> keyword[None] :
literal[string]
identifier[main_only_quicksetup_rootlogger] ( identifier[level] = identifier[logging] . identifier[DEBUG] )
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] (
identifier[formatter_class] = identifier[argparse] . identifier[ArgumentDefaultsHelpFormatter]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[args] = identifier[parser] . identifier[parse_args] ()
keyword[if] identifier[args] . identifier[unittest] :
identifier[log] . identifier[warning] ( literal[string] )
identifier[unittest] . identifier[main] ( identifier[argv] =[ identifier[sys] . identifier[argv] [ literal[int] ]])
identifier[sys] . identifier[exit] ( identifier[EXIT_SUCCESS] )
identifier[success] = identifier[convert_to_foldable] (
identifier[input_filename] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[args] . identifier[input_file] ),
identifier[output_filename] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[args] . identifier[output_file] ),
identifier[slice_horiz] = identifier[args] . identifier[slice_horiz] ,
identifier[slice_vert] = identifier[args] . identifier[slice_vert] ,
identifier[overwrite] = identifier[args] . identifier[overwrite] ,
identifier[longedge] = identifier[args] . identifier[longedge]
)
identifier[sys] . identifier[exit] ( identifier[EXIT_SUCCESS] keyword[if] identifier[success] keyword[else] identifier[EXIT_FAILURE] )
|
def main() -> None:
"""
Command-line processor. See ``--help`` for details.
"""
main_only_quicksetup_rootlogger(level=logging.DEBUG)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input_file', help='Input PDF (which is not modified by this program)')
parser.add_argument('output_file', help='Output PDF')
parser.add_argument('--slice_horiz', type=int, default=1, help='Slice the input PDF first into this many parts horizontally')
parser.add_argument('--slice_vert', type=int, default=1, help='Slice the input PDF first into this many parts vertically')
parser.add_argument('--longedge', action='store_true', help='Create PDF for long-edge duplex printing, not short edge')
parser.add_argument('--overwrite', action='store_true', help='Allow overwriting of an existing output file')
parser.add_argument('--unittest', action='store_true', help='Run unit tests and exit (you must pass dummy values for input/output files to use these tests)')
# ... because requiring dummy input/output filenames for unit testing
# is less confusing for the majority of users than showing syntax in
# which they are optional!
args = parser.parse_args()
if args.unittest:
log.warning('Performing unit tests')
# unittest.main() doesn't play nicely with argparse; they both
# use sys.argv by default (and we end up with what looks like garbage
# from the argparse help facility); but this works:
unittest.main(argv=[sys.argv[0]])
sys.exit(EXIT_SUCCESS) # depends on [control=['if'], data=[]]
success = convert_to_foldable(input_filename=os.path.abspath(args.input_file), output_filename=os.path.abspath(args.output_file), slice_horiz=args.slice_horiz, slice_vert=args.slice_vert, overwrite=args.overwrite, longedge=args.longedge)
sys.exit(EXIT_SUCCESS if success else EXIT_FAILURE)
|
def clear_mode(self, mode, value=None):
"""Clear mode on the channel.
Arguments:
mode -- The mode (a single-character string).
value -- Value
"""
try:
if mode in self.user_modes:
del self.mode_users[mode][value]
else:
del self.modes[mode]
except KeyError:
pass
|
def function[clear_mode, parameter[self, mode, value]]:
constant[Clear mode on the channel.
Arguments:
mode -- The mode (a single-character string).
value -- Value
]
<ast.Try object at 0x7da2049607c0>
|
keyword[def] identifier[clear_mode] ( identifier[self] , identifier[mode] , identifier[value] = keyword[None] ):
literal[string]
keyword[try] :
keyword[if] identifier[mode] keyword[in] identifier[self] . identifier[user_modes] :
keyword[del] identifier[self] . identifier[mode_users] [ identifier[mode] ][ identifier[value] ]
keyword[else] :
keyword[del] identifier[self] . identifier[modes] [ identifier[mode] ]
keyword[except] identifier[KeyError] :
keyword[pass]
|
def clear_mode(self, mode, value=None):
"""Clear mode on the channel.
Arguments:
mode -- The mode (a single-character string).
value -- Value
"""
try:
if mode in self.user_modes:
del self.mode_users[mode][value] # depends on [control=['if'], data=['mode']]
else:
del self.modes[mode] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
|
def get_scan_results_xml(self, scan_id, pop_res):
""" Gets scan_id scan's results in XML format.
@return: String of scan results in xml.
"""
results = Element('results')
for result in self.scan_collection.results_iterator(scan_id, pop_res):
results.append(get_result_xml(result))
logger.info('Returning %d results', len(results))
return results
|
def function[get_scan_results_xml, parameter[self, scan_id, pop_res]]:
constant[ Gets scan_id scan's results in XML format.
@return: String of scan results in xml.
]
variable[results] assign[=] call[name[Element], parameter[constant[results]]]
for taget[name[result]] in starred[call[name[self].scan_collection.results_iterator, parameter[name[scan_id], name[pop_res]]]] begin[:]
call[name[results].append, parameter[call[name[get_result_xml], parameter[name[result]]]]]
call[name[logger].info, parameter[constant[Returning %d results], call[name[len], parameter[name[results]]]]]
return[name[results]]
|
keyword[def] identifier[get_scan_results_xml] ( identifier[self] , identifier[scan_id] , identifier[pop_res] ):
literal[string]
identifier[results] = identifier[Element] ( literal[string] )
keyword[for] identifier[result] keyword[in] identifier[self] . identifier[scan_collection] . identifier[results_iterator] ( identifier[scan_id] , identifier[pop_res] ):
identifier[results] . identifier[append] ( identifier[get_result_xml] ( identifier[result] ))
identifier[logger] . identifier[info] ( literal[string] , identifier[len] ( identifier[results] ))
keyword[return] identifier[results]
|
def get_scan_results_xml(self, scan_id, pop_res):
""" Gets scan_id scan's results in XML format.
@return: String of scan results in xml.
"""
results = Element('results')
for result in self.scan_collection.results_iterator(scan_id, pop_res):
results.append(get_result_xml(result)) # depends on [control=['for'], data=['result']]
logger.info('Returning %d results', len(results))
return results
|
def parse_fn(fn):
""" This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
"""
try:
parts = os.path.splitext(os.path.split(fn)[-1])[0].replace('o', '.')\
.split('_')[:2]
coords = [float(crds)
for crds in re.split('[NSEW]', parts[0] + parts[1])[1:]]
except:
coords = [np.nan] * 4
return coords
|
def function[parse_fn, parameter[fn]]:
constant[ This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
]
<ast.Try object at 0x7da2041db5e0>
return[name[coords]]
|
keyword[def] identifier[parse_fn] ( identifier[fn] ):
literal[string]
keyword[try] :
identifier[parts] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[split] ( identifier[fn] )[- literal[int] ])[ literal[int] ]. identifier[replace] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )[: literal[int] ]
identifier[coords] =[ identifier[float] ( identifier[crds] )
keyword[for] identifier[crds] keyword[in] identifier[re] . identifier[split] ( literal[string] , identifier[parts] [ literal[int] ]+ identifier[parts] [ literal[int] ])[ literal[int] :]]
keyword[except] :
identifier[coords] =[ identifier[np] . identifier[nan] ]* literal[int]
keyword[return] identifier[coords]
|
def parse_fn(fn):
""" This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
"""
try:
parts = os.path.splitext(os.path.split(fn)[-1])[0].replace('o', '.').split('_')[:2]
coords = [float(crds) for crds in re.split('[NSEW]', parts[0] + parts[1])[1:]] # depends on [control=['try'], data=[]]
except:
coords = [np.nan] * 4 # depends on [control=['except'], data=[]]
return coords
|
def html_page(title="Page Title", body=""):
"""Create HTML page as string."""
html = "<html>\n<head><title>%s</title></head>\n<body>\n" % (title)
html += "<h1>%s</h1>\n" % (title)
html += body
html += "</body>\n</html>\n"
return html
|
def function[html_page, parameter[title, body]]:
constant[Create HTML page as string.]
variable[html] assign[=] binary_operation[constant[<html>
<head><title>%s</title></head>
<body>
] <ast.Mod object at 0x7da2590d6920> name[title]]
<ast.AugAssign object at 0x7da20c76c1c0>
<ast.AugAssign object at 0x7da20c76d420>
<ast.AugAssign object at 0x7da20c76c250>
return[name[html]]
|
keyword[def] identifier[html_page] ( identifier[title] = literal[string] , identifier[body] = literal[string] ):
literal[string]
identifier[html] = literal[string] %( identifier[title] )
identifier[html] += literal[string] %( identifier[title] )
identifier[html] += identifier[body]
identifier[html] += literal[string]
keyword[return] identifier[html]
|
def html_page(title='Page Title', body=''):
"""Create HTML page as string."""
html = '<html>\n<head><title>%s</title></head>\n<body>\n' % title
html += '<h1>%s</h1>\n' % title
html += body
html += '</body>\n</html>\n'
return html
|
def _init_log(level=logging.DEBUG):
"""Initialise the logging object.
Args:
level (int): Logging level.
Returns:
Logger: Python logging object.
"""
log = logging.getLogger(__file__)
log.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
formatter = logging.Formatter('%(asctime)s: %(message)s',
'%Y/%m/%d-%H:%M:%S')
handler.setFormatter(formatter)
log.addHandler(handler)
return log
|
def function[_init_log, parameter[level]]:
constant[Initialise the logging object.
Args:
level (int): Logging level.
Returns:
Logger: Python logging object.
]
variable[log] assign[=] call[name[logging].getLogger, parameter[name[__file__]]]
call[name[log].setLevel, parameter[name[level]]]
variable[handler] assign[=] call[name[logging].StreamHandler, parameter[name[sys].stdout]]
call[name[handler].setLevel, parameter[name[level]]]
variable[formatter] assign[=] call[name[logging].Formatter, parameter[constant[%(asctime)s: %(message)s], constant[%Y/%m/%d-%H:%M:%S]]]
call[name[handler].setFormatter, parameter[name[formatter]]]
call[name[log].addHandler, parameter[name[handler]]]
return[name[log]]
|
keyword[def] identifier[_init_log] ( identifier[level] = identifier[logging] . identifier[DEBUG] ):
literal[string]
identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[__file__] )
identifier[log] . identifier[setLevel] ( identifier[level] )
identifier[handler] = identifier[logging] . identifier[StreamHandler] ( identifier[sys] . identifier[stdout] )
identifier[handler] . identifier[setLevel] ( identifier[level] )
identifier[formatter] = identifier[logging] . identifier[Formatter] ( literal[string] ,
literal[string] )
identifier[handler] . identifier[setFormatter] ( identifier[formatter] )
identifier[log] . identifier[addHandler] ( identifier[handler] )
keyword[return] identifier[log]
|
def _init_log(level=logging.DEBUG):
"""Initialise the logging object.
Args:
level (int): Logging level.
Returns:
Logger: Python logging object.
"""
log = logging.getLogger(__file__)
log.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
formatter = logging.Formatter('%(asctime)s: %(message)s', '%Y/%m/%d-%H:%M:%S')
handler.setFormatter(formatter)
log.addHandler(handler)
return log
|
def listRoles(self, *args, **kwargs):
"""
List Roles
Get a list of all roles, each role object also includes the list of
scopes it expands to.
This method gives output: ``v1/list-roles-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoles"], *args, **kwargs)
|
def function[listRoles, parameter[self]]:
constant[
List Roles
Get a list of all roles, each role object also includes the list of
scopes it expands to.
This method gives output: ``v1/list-roles-response.json#``
This method is ``stable``
]
return[call[name[self]._makeApiCall, parameter[call[name[self].funcinfo][constant[listRoles]], <ast.Starred object at 0x7da18eb54640>]]]
|
keyword[def] identifier[listRoles] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_makeApiCall] ( identifier[self] . identifier[funcinfo] [ literal[string] ],* identifier[args] ,** identifier[kwargs] )
|
def listRoles(self, *args, **kwargs):
"""
List Roles
Get a list of all roles, each role object also includes the list of
scopes it expands to.
This method gives output: ``v1/list-roles-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo['listRoles'], *args, **kwargs)
|
def get_icon_for(self, brain_or_object):
"""Get the navigation portlet icon for the brain or object
The cache key ensures that the lookup is done only once per domain name
"""
portal_types = api.get_tool("portal_types")
fti = portal_types.getTypeInfo(api.get_portal_type(brain_or_object))
icon = fti.getIcon()
if not icon:
return ""
# Always try to get the big icon for high-res displays
icon_big = icon.replace(".png", "_big.png")
# fall back to a default icon if the looked up icon does not exist
if self.context.restrictedTraverse(icon_big, None) is None:
icon_big = None
portal_url = api.get_url(api.get_portal())
title = api.get_title(brain_or_object)
html_tag = "<img title='{}' src='{}/{}' width='16' />".format(
title, portal_url, icon_big or icon)
logger.info("Generated Icon Tag for {}: {}".format(
api.get_path(brain_or_object), html_tag))
return html_tag
|
def function[get_icon_for, parameter[self, brain_or_object]]:
constant[Get the navigation portlet icon for the brain or object
The cache key ensures that the lookup is done only once per domain name
]
variable[portal_types] assign[=] call[name[api].get_tool, parameter[constant[portal_types]]]
variable[fti] assign[=] call[name[portal_types].getTypeInfo, parameter[call[name[api].get_portal_type, parameter[name[brain_or_object]]]]]
variable[icon] assign[=] call[name[fti].getIcon, parameter[]]
if <ast.UnaryOp object at 0x7da1b07b97b0> begin[:]
return[constant[]]
variable[icon_big] assign[=] call[name[icon].replace, parameter[constant[.png], constant[_big.png]]]
if compare[call[name[self].context.restrictedTraverse, parameter[name[icon_big], constant[None]]] is constant[None]] begin[:]
variable[icon_big] assign[=] constant[None]
variable[portal_url] assign[=] call[name[api].get_url, parameter[call[name[api].get_portal, parameter[]]]]
variable[title] assign[=] call[name[api].get_title, parameter[name[brain_or_object]]]
variable[html_tag] assign[=] call[constant[<img title='{}' src='{}/{}' width='16' />].format, parameter[name[title], name[portal_url], <ast.BoolOp object at 0x7da1b07bbca0>]]
call[name[logger].info, parameter[call[constant[Generated Icon Tag for {}: {}].format, parameter[call[name[api].get_path, parameter[name[brain_or_object]]], name[html_tag]]]]]
return[name[html_tag]]
|
keyword[def] identifier[get_icon_for] ( identifier[self] , identifier[brain_or_object] ):
literal[string]
identifier[portal_types] = identifier[api] . identifier[get_tool] ( literal[string] )
identifier[fti] = identifier[portal_types] . identifier[getTypeInfo] ( identifier[api] . identifier[get_portal_type] ( identifier[brain_or_object] ))
identifier[icon] = identifier[fti] . identifier[getIcon] ()
keyword[if] keyword[not] identifier[icon] :
keyword[return] literal[string]
identifier[icon_big] = identifier[icon] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[self] . identifier[context] . identifier[restrictedTraverse] ( identifier[icon_big] , keyword[None] ) keyword[is] keyword[None] :
identifier[icon_big] = keyword[None]
identifier[portal_url] = identifier[api] . identifier[get_url] ( identifier[api] . identifier[get_portal] ())
identifier[title] = identifier[api] . identifier[get_title] ( identifier[brain_or_object] )
identifier[html_tag] = literal[string] . identifier[format] (
identifier[title] , identifier[portal_url] , identifier[icon_big] keyword[or] identifier[icon] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] (
identifier[api] . identifier[get_path] ( identifier[brain_or_object] ), identifier[html_tag] ))
keyword[return] identifier[html_tag]
|
def get_icon_for(self, brain_or_object):
"""Get the navigation portlet icon for the brain or object
The cache key ensures that the lookup is done only once per domain name
"""
portal_types = api.get_tool('portal_types')
fti = portal_types.getTypeInfo(api.get_portal_type(brain_or_object))
icon = fti.getIcon()
if not icon:
return '' # depends on [control=['if'], data=[]]
# Always try to get the big icon for high-res displays
icon_big = icon.replace('.png', '_big.png')
# fall back to a default icon if the looked up icon does not exist
if self.context.restrictedTraverse(icon_big, None) is None:
icon_big = None # depends on [control=['if'], data=[]]
portal_url = api.get_url(api.get_portal())
title = api.get_title(brain_or_object)
html_tag = "<img title='{}' src='{}/{}' width='16' />".format(title, portal_url, icon_big or icon)
logger.info('Generated Icon Tag for {}: {}'.format(api.get_path(brain_or_object), html_tag))
return html_tag
|
def clinsig_human(variant_obj):
"""Convert to human readable version of CLINSIG evaluation."""
for clinsig_obj in variant_obj['clnsig']:
# The clinsig objects allways have a accession
if isinstance(clinsig_obj['accession'], int):
# New version
link = "https://www.ncbi.nlm.nih.gov/clinvar/variation/{}"
else:
# Old version
link = "https://www.ncbi.nlm.nih.gov/clinvar/{}"
human_str = 'not provided'
if clinsig_obj.get('value'):
try:
# Old version
int(clinsig_obj['value'])
human_str = CLINSIG_MAP.get(clinsig_obj['value'], 'not provided')
except ValueError:
# New version
human_str = clinsig_obj['value']
clinsig_obj['human'] = human_str
clinsig_obj['link'] = link.format(clinsig_obj['accession'])
yield clinsig_obj
|
def function[clinsig_human, parameter[variant_obj]]:
constant[Convert to human readable version of CLINSIG evaluation.]
for taget[name[clinsig_obj]] in starred[call[name[variant_obj]][constant[clnsig]]] begin[:]
if call[name[isinstance], parameter[call[name[clinsig_obj]][constant[accession]], name[int]]] begin[:]
variable[link] assign[=] constant[https://www.ncbi.nlm.nih.gov/clinvar/variation/{}]
variable[human_str] assign[=] constant[not provided]
if call[name[clinsig_obj].get, parameter[constant[value]]] begin[:]
<ast.Try object at 0x7da20e955f30>
call[name[clinsig_obj]][constant[human]] assign[=] name[human_str]
call[name[clinsig_obj]][constant[link]] assign[=] call[name[link].format, parameter[call[name[clinsig_obj]][constant[accession]]]]
<ast.Yield object at 0x7da20e956f50>
|
keyword[def] identifier[clinsig_human] ( identifier[variant_obj] ):
literal[string]
keyword[for] identifier[clinsig_obj] keyword[in] identifier[variant_obj] [ literal[string] ]:
keyword[if] identifier[isinstance] ( identifier[clinsig_obj] [ literal[string] ], identifier[int] ):
identifier[link] = literal[string]
keyword[else] :
identifier[link] = literal[string]
identifier[human_str] = literal[string]
keyword[if] identifier[clinsig_obj] . identifier[get] ( literal[string] ):
keyword[try] :
identifier[int] ( identifier[clinsig_obj] [ literal[string] ])
identifier[human_str] = identifier[CLINSIG_MAP] . identifier[get] ( identifier[clinsig_obj] [ literal[string] ], literal[string] )
keyword[except] identifier[ValueError] :
identifier[human_str] = identifier[clinsig_obj] [ literal[string] ]
identifier[clinsig_obj] [ literal[string] ]= identifier[human_str]
identifier[clinsig_obj] [ literal[string] ]= identifier[link] . identifier[format] ( identifier[clinsig_obj] [ literal[string] ])
keyword[yield] identifier[clinsig_obj]
|
def clinsig_human(variant_obj):
"""Convert to human readable version of CLINSIG evaluation."""
for clinsig_obj in variant_obj['clnsig']:
# The clinsig objects allways have a accession
if isinstance(clinsig_obj['accession'], int):
# New version
link = 'https://www.ncbi.nlm.nih.gov/clinvar/variation/{}' # depends on [control=['if'], data=[]]
else:
# Old version
link = 'https://www.ncbi.nlm.nih.gov/clinvar/{}'
human_str = 'not provided'
if clinsig_obj.get('value'):
try:
# Old version
int(clinsig_obj['value'])
human_str = CLINSIG_MAP.get(clinsig_obj['value'], 'not provided') # depends on [control=['try'], data=[]]
except ValueError:
# New version
human_str = clinsig_obj['value'] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
clinsig_obj['human'] = human_str
clinsig_obj['link'] = link.format(clinsig_obj['accession'])
yield clinsig_obj # depends on [control=['for'], data=['clinsig_obj']]
|
def reset(self):
"""Reset the service to its' initial state."""
logger.debug('StackInABoxService ({0}): Reset'
.format(self.__id, self.name))
self.base_url = '/{0}'.format(self.name)
logger.debug('StackInABoxService ({0}): Hosting Service {1}'
.format(self.__id, self.name))
|
def function[reset, parameter[self]]:
constant[Reset the service to its' initial state.]
call[name[logger].debug, parameter[call[constant[StackInABoxService ({0}): Reset].format, parameter[name[self].__id, name[self].name]]]]
name[self].base_url assign[=] call[constant[/{0}].format, parameter[name[self].name]]
call[name[logger].debug, parameter[call[constant[StackInABoxService ({0}): Hosting Service {1}].format, parameter[name[self].__id, name[self].name]]]]
|
keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string]
. identifier[format] ( identifier[self] . identifier[__id] , identifier[self] . identifier[name] ))
identifier[self] . identifier[base_url] = literal[string] . identifier[format] ( identifier[self] . identifier[name] )
identifier[logger] . identifier[debug] ( literal[string]
. identifier[format] ( identifier[self] . identifier[__id] , identifier[self] . identifier[name] ))
|
def reset(self):
"""Reset the service to its' initial state."""
logger.debug('StackInABoxService ({0}): Reset'.format(self.__id, self.name))
self.base_url = '/{0}'.format(self.name)
logger.debug('StackInABoxService ({0}): Hosting Service {1}'.format(self.__id, self.name))
|
def invoke_hook_emit(self, values, stream_id, out_tasks):
"""invoke task hooks for every time a tuple is emitted in spout/bolt
:type values: list
:param values: values emitted
:type stream_id: str
:param stream_id: stream id into which tuple is emitted
:type out_tasks: list
:param out_tasks: list of custom grouping target task id
"""
if len(self.task_hooks) > 0:
emit_info = EmitInfo(values=values, stream_id=stream_id,
task_id=self.get_task_id(), out_tasks=out_tasks)
for task_hook in self.task_hooks:
task_hook.emit(emit_info)
|
def function[invoke_hook_emit, parameter[self, values, stream_id, out_tasks]]:
constant[invoke task hooks for every time a tuple is emitted in spout/bolt
:type values: list
:param values: values emitted
:type stream_id: str
:param stream_id: stream id into which tuple is emitted
:type out_tasks: list
:param out_tasks: list of custom grouping target task id
]
if compare[call[name[len], parameter[name[self].task_hooks]] greater[>] constant[0]] begin[:]
variable[emit_info] assign[=] call[name[EmitInfo], parameter[]]
for taget[name[task_hook]] in starred[name[self].task_hooks] begin[:]
call[name[task_hook].emit, parameter[name[emit_info]]]
|
keyword[def] identifier[invoke_hook_emit] ( identifier[self] , identifier[values] , identifier[stream_id] , identifier[out_tasks] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[task_hooks] )> literal[int] :
identifier[emit_info] = identifier[EmitInfo] ( identifier[values] = identifier[values] , identifier[stream_id] = identifier[stream_id] ,
identifier[task_id] = identifier[self] . identifier[get_task_id] (), identifier[out_tasks] = identifier[out_tasks] )
keyword[for] identifier[task_hook] keyword[in] identifier[self] . identifier[task_hooks] :
identifier[task_hook] . identifier[emit] ( identifier[emit_info] )
|
def invoke_hook_emit(self, values, stream_id, out_tasks):
"""invoke task hooks for every time a tuple is emitted in spout/bolt
:type values: list
:param values: values emitted
:type stream_id: str
:param stream_id: stream id into which tuple is emitted
:type out_tasks: list
:param out_tasks: list of custom grouping target task id
"""
if len(self.task_hooks) > 0:
emit_info = EmitInfo(values=values, stream_id=stream_id, task_id=self.get_task_id(), out_tasks=out_tasks)
for task_hook in self.task_hooks:
task_hook.emit(emit_info) # depends on [control=['for'], data=['task_hook']] # depends on [control=['if'], data=[]]
|
def get_projects(limit=100, offset=0, last_id=None):
"""Return a list of registered projects.
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:type offset: integer
:param last_id: id of the last project, used for pagination. If provided, offset is ignored
:type last_id: integer
:rtype: list
:returns: A list of PYBOSSA Projects
"""
if last_id is not None:
params = dict(limit=limit, last_id=last_id)
else:
print(OFFSET_WARNING)
params = dict(limit=limit, offset=offset)
try:
res = _pybossa_req('get', 'project',
params=params)
if type(res).__name__ == 'list':
return [Project(project) for project in res]
else:
raise TypeError
except: # pragma: no cover
raise
|
def function[get_projects, parameter[limit, offset, last_id]]:
constant[Return a list of registered projects.
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:type offset: integer
:param last_id: id of the last project, used for pagination. If provided, offset is ignored
:type last_id: integer
:rtype: list
:returns: A list of PYBOSSA Projects
]
if compare[name[last_id] is_not constant[None]] begin[:]
variable[params] assign[=] call[name[dict], parameter[]]
<ast.Try object at 0x7da18f810ee0>
|
keyword[def] identifier[get_projects] ( identifier[limit] = literal[int] , identifier[offset] = literal[int] , identifier[last_id] = keyword[None] ):
literal[string]
keyword[if] identifier[last_id] keyword[is] keyword[not] keyword[None] :
identifier[params] = identifier[dict] ( identifier[limit] = identifier[limit] , identifier[last_id] = identifier[last_id] )
keyword[else] :
identifier[print] ( identifier[OFFSET_WARNING] )
identifier[params] = identifier[dict] ( identifier[limit] = identifier[limit] , identifier[offset] = identifier[offset] )
keyword[try] :
identifier[res] = identifier[_pybossa_req] ( literal[string] , literal[string] ,
identifier[params] = identifier[params] )
keyword[if] identifier[type] ( identifier[res] ). identifier[__name__] == literal[string] :
keyword[return] [ identifier[Project] ( identifier[project] ) keyword[for] identifier[project] keyword[in] identifier[res] ]
keyword[else] :
keyword[raise] identifier[TypeError]
keyword[except] :
keyword[raise]
|
def get_projects(limit=100, offset=0, last_id=None):
"""Return a list of registered projects.
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:type offset: integer
:param last_id: id of the last project, used for pagination. If provided, offset is ignored
:type last_id: integer
:rtype: list
:returns: A list of PYBOSSA Projects
"""
if last_id is not None:
params = dict(limit=limit, last_id=last_id) # depends on [control=['if'], data=['last_id']]
else:
print(OFFSET_WARNING)
params = dict(limit=limit, offset=offset)
try:
res = _pybossa_req('get', 'project', params=params)
if type(res).__name__ == 'list':
return [Project(project) for project in res] # depends on [control=['if'], data=[]]
else:
raise TypeError # depends on [control=['try'], data=[]]
except: # pragma: no cover
raise # depends on [control=['except'], data=[]]
|
def _adjust_merged_values_orm(env, model_name, record_ids, target_record_id,
field_spec):
"""This method deals with the values on the records to be merged +
the target record, performing operations that makes sense on the meaning
of the model.
:param field_spec: Dictionary with field names as keys and forced operation
to perform as values. If a field is not present here, default operation
will be performed.
Possible operations by field types:
* Char, Text and Html fields:
- 'merge' (default for Text and Html): content is concatenated
with an ' | ' as separator
- other value (default for Char): content on target record is preserved
* Integer, Float and Monetary fields:
- 'sum' (default for Float and Monetary): Sum all the values of
the records.
- 'avg': Perform the arithmetic average of the values of the records.
- 'max': Put the maximum of all the values.
- 'min': Put the minimum of all the values.
- other value (default for Integer): content on target record
is preserved
* Binary field:
- 'merge' (default): apply first not null value of the records if
value of target record is null, preserve target value otherwise.
- other value: content on target record is preserved
* Boolean field:
- 'and': Perform a logical AND over all values.
- 'or': Perform a logical OR over all values.
- other value (default): content on target record is preserved
* Date and Datetime fields:
- 'max': Put the maximum of all the values.
- 'min': Put the minimum of all the values.
- other value (default): content on target record is preserved
* Many2one fields:
- 'merge' (default): apply first not null value of the records if
value of target record is null, preserve target value otherwise.
- other value: content on target record is preserved
* Many2many fields:
- 'merge' (default): combine all the values
- other value: content on target record is preserved
* One2many fields:
- 'merge' (default): combine all the values
- other value: content on target record is preserved
* Reference fields:
- any value: content on target record is preserved
* Selection fields:
- any value: content on target record is preserved
"""
model = env[model_name]
fields = model._fields.values()
all_records = model.browse(tuple(record_ids) + (target_record_id, ))
target_record = model.browse(target_record_id)
vals = {}
o2m_changes = 0
for field in fields:
if not field.store or field.compute or field.related:
continue # don't do anything on these cases
op = field_spec.get(field.name, False)
l = all_records.mapped(field.name)
if field.type in ('char', 'text', 'html'):
if not op:
op = 'other' if field.type == 'char' else 'merge'
if op == 'merge':
l = filter(lambda x: x is not False, l)
vals[field.name] = ' | '.join(l)
elif field.type in ('integer', 'float', 'monetary'):
if not op:
op = 'other' if field.type == 'integer' else 'sum'
if op == 'sum':
vals[field.name] = sum(l)
elif op == 'avg':
vals[field.name] = sum(l) / len(l)
elif op == 'max':
vals[field.name] = max(l)
elif op == 'min':
vals[field.name] = min(l)
elif field.type == 'boolean':
op = op or 'other'
if op == 'and':
vals[field.name] = functools.reduce(lambda x, y: x & y, l)
elif op == 'or':
vals[field.name] = functools.reduce(lambda x, y: x | y, l)
elif field.type in ('date', 'datetime'):
if op:
l = filter(lambda x: x is not False, l)
op = op or 'other'
if op == 'max':
vals[field.name] = max(l)
elif op == 'min':
vals[field.name] = min(l)
elif field.type == 'many2many':
op = op or 'merge'
if op == 'merge':
l = filter(lambda x: x is not False, l)
vals[field.name] = [(4, x.id) for x in l]
elif field.type == 'one2many':
op = op or 'merge'
if op == 'merge':
o2m_changes += 1
l.write({field.inverse_name: target_record_id})
elif field.type == 'binary':
op = op or 'merge'
if op == 'merge':
l = [x for x in l if x]
if not getattr(target_record, field.name) and l:
vals[field.name] = l[0]
elif field.type == 'many2one':
op = op or 'merge'
if op == 'merge':
if not getattr(target_record, field.name) and l:
vals[field.name] = l[0]
# Curate values that haven't changed
new_vals = {}
for f in vals:
if model._fields[f].type != 'many2many':
if vals[f] != getattr(target_record, f):
new_vals[f] = vals[f]
else:
if [x[1] for x in vals[f]] not in getattr(target_record, f).ids:
new_vals[f] = vals[f]
if new_vals:
target_record.write(new_vals)
logger.debug(
"Write %s value(s) in target record '%s' of model '%s'",
len(new_vals) + o2m_changes, target_record_id, model_name,
)
|
def function[_adjust_merged_values_orm, parameter[env, model_name, record_ids, target_record_id, field_spec]]:
constant[This method deals with the values on the records to be merged +
the target record, performing operations that makes sense on the meaning
of the model.
:param field_spec: Dictionary with field names as keys and forced operation
to perform as values. If a field is not present here, default operation
will be performed.
Possible operations by field types:
* Char, Text and Html fields:
- 'merge' (default for Text and Html): content is concatenated
with an ' | ' as separator
- other value (default for Char): content on target record is preserved
* Integer, Float and Monetary fields:
- 'sum' (default for Float and Monetary): Sum all the values of
the records.
- 'avg': Perform the arithmetic average of the values of the records.
- 'max': Put the maximum of all the values.
- 'min': Put the minimum of all the values.
- other value (default for Integer): content on target record
is preserved
* Binary field:
- 'merge' (default): apply first not null value of the records if
value of target record is null, preserve target value otherwise.
- other value: content on target record is preserved
* Boolean field:
- 'and': Perform a logical AND over all values.
- 'or': Perform a logical OR over all values.
- other value (default): content on target record is preserved
* Date and Datetime fields:
- 'max': Put the maximum of all the values.
- 'min': Put the minimum of all the values.
- other value (default): content on target record is preserved
* Many2one fields:
- 'merge' (default): apply first not null value of the records if
value of target record is null, preserve target value otherwise.
- other value: content on target record is preserved
* Many2many fields:
- 'merge' (default): combine all the values
- other value: content on target record is preserved
* One2many fields:
- 'merge' (default): combine all the values
- other value: content on target record is preserved
* Reference fields:
- any value: content on target record is preserved
* Selection fields:
- any value: content on target record is preserved
]
variable[model] assign[=] call[name[env]][name[model_name]]
variable[fields] assign[=] call[name[model]._fields.values, parameter[]]
variable[all_records] assign[=] call[name[model].browse, parameter[binary_operation[call[name[tuple], parameter[name[record_ids]]] + tuple[[<ast.Name object at 0x7da18f09dc90>]]]]]
variable[target_record] assign[=] call[name[model].browse, parameter[name[target_record_id]]]
variable[vals] assign[=] dictionary[[], []]
variable[o2m_changes] assign[=] constant[0]
for taget[name[field]] in starred[name[fields]] begin[:]
if <ast.BoolOp object at 0x7da18f09fb80> begin[:]
continue
variable[op] assign[=] call[name[field_spec].get, parameter[name[field].name, constant[False]]]
variable[l] assign[=] call[name[all_records].mapped, parameter[name[field].name]]
if compare[name[field].type in tuple[[<ast.Constant object at 0x7da18f09db40>, <ast.Constant object at 0x7da18f09da20>, <ast.Constant object at 0x7da18f09f070>]]] begin[:]
if <ast.UnaryOp object at 0x7da18f09f610> begin[:]
variable[op] assign[=] <ast.IfExp object at 0x7da18f09dd50>
if compare[name[op] equal[==] constant[merge]] begin[:]
variable[l] assign[=] call[name[filter], parameter[<ast.Lambda object at 0x7da18f09ee90>, name[l]]]
call[name[vals]][name[field].name] assign[=] call[constant[ | ].join, parameter[name[l]]]
variable[new_vals] assign[=] dictionary[[], []]
for taget[name[f]] in starred[name[vals]] begin[:]
if compare[call[name[model]._fields][name[f]].type not_equal[!=] constant[many2many]] begin[:]
if compare[call[name[vals]][name[f]] not_equal[!=] call[name[getattr], parameter[name[target_record], name[f]]]] begin[:]
call[name[new_vals]][name[f]] assign[=] call[name[vals]][name[f]]
if name[new_vals] begin[:]
call[name[target_record].write, parameter[name[new_vals]]]
call[name[logger].debug, parameter[constant[Write %s value(s) in target record '%s' of model '%s'], binary_operation[call[name[len], parameter[name[new_vals]]] + name[o2m_changes]], name[target_record_id], name[model_name]]]
|
keyword[def] identifier[_adjust_merged_values_orm] ( identifier[env] , identifier[model_name] , identifier[record_ids] , identifier[target_record_id] ,
identifier[field_spec] ):
literal[string]
identifier[model] = identifier[env] [ identifier[model_name] ]
identifier[fields] = identifier[model] . identifier[_fields] . identifier[values] ()
identifier[all_records] = identifier[model] . identifier[browse] ( identifier[tuple] ( identifier[record_ids] )+( identifier[target_record_id] ,))
identifier[target_record] = identifier[model] . identifier[browse] ( identifier[target_record_id] )
identifier[vals] ={}
identifier[o2m_changes] = literal[int]
keyword[for] identifier[field] keyword[in] identifier[fields] :
keyword[if] keyword[not] identifier[field] . identifier[store] keyword[or] identifier[field] . identifier[compute] keyword[or] identifier[field] . identifier[related] :
keyword[continue]
identifier[op] = identifier[field_spec] . identifier[get] ( identifier[field] . identifier[name] , keyword[False] )
identifier[l] = identifier[all_records] . identifier[mapped] ( identifier[field] . identifier[name] )
keyword[if] identifier[field] . identifier[type] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[if] keyword[not] identifier[op] :
identifier[op] = literal[string] keyword[if] identifier[field] . identifier[type] == literal[string] keyword[else] literal[string]
keyword[if] identifier[op] == literal[string] :
identifier[l] = identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] keyword[is] keyword[not] keyword[False] , identifier[l] )
identifier[vals] [ identifier[field] . identifier[name] ]= literal[string] . identifier[join] ( identifier[l] )
keyword[elif] identifier[field] . identifier[type] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[if] keyword[not] identifier[op] :
identifier[op] = literal[string] keyword[if] identifier[field] . identifier[type] == literal[string] keyword[else] literal[string]
keyword[if] identifier[op] == literal[string] :
identifier[vals] [ identifier[field] . identifier[name] ]= identifier[sum] ( identifier[l] )
keyword[elif] identifier[op] == literal[string] :
identifier[vals] [ identifier[field] . identifier[name] ]= identifier[sum] ( identifier[l] )/ identifier[len] ( identifier[l] )
keyword[elif] identifier[op] == literal[string] :
identifier[vals] [ identifier[field] . identifier[name] ]= identifier[max] ( identifier[l] )
keyword[elif] identifier[op] == literal[string] :
identifier[vals] [ identifier[field] . identifier[name] ]= identifier[min] ( identifier[l] )
keyword[elif] identifier[field] . identifier[type] == literal[string] :
identifier[op] = identifier[op] keyword[or] literal[string]
keyword[if] identifier[op] == literal[string] :
identifier[vals] [ identifier[field] . identifier[name] ]= identifier[functools] . identifier[reduce] ( keyword[lambda] identifier[x] , identifier[y] : identifier[x] & identifier[y] , identifier[l] )
keyword[elif] identifier[op] == literal[string] :
identifier[vals] [ identifier[field] . identifier[name] ]= identifier[functools] . identifier[reduce] ( keyword[lambda] identifier[x] , identifier[y] : identifier[x] | identifier[y] , identifier[l] )
keyword[elif] identifier[field] . identifier[type] keyword[in] ( literal[string] , literal[string] ):
keyword[if] identifier[op] :
identifier[l] = identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] keyword[is] keyword[not] keyword[False] , identifier[l] )
identifier[op] = identifier[op] keyword[or] literal[string]
keyword[if] identifier[op] == literal[string] :
identifier[vals] [ identifier[field] . identifier[name] ]= identifier[max] ( identifier[l] )
keyword[elif] identifier[op] == literal[string] :
identifier[vals] [ identifier[field] . identifier[name] ]= identifier[min] ( identifier[l] )
keyword[elif] identifier[field] . identifier[type] == literal[string] :
identifier[op] = identifier[op] keyword[or] literal[string]
keyword[if] identifier[op] == literal[string] :
identifier[l] = identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] keyword[is] keyword[not] keyword[False] , identifier[l] )
identifier[vals] [ identifier[field] . identifier[name] ]=[( literal[int] , identifier[x] . identifier[id] ) keyword[for] identifier[x] keyword[in] identifier[l] ]
keyword[elif] identifier[field] . identifier[type] == literal[string] :
identifier[op] = identifier[op] keyword[or] literal[string]
keyword[if] identifier[op] == literal[string] :
identifier[o2m_changes] += literal[int]
identifier[l] . identifier[write] ({ identifier[field] . identifier[inverse_name] : identifier[target_record_id] })
keyword[elif] identifier[field] . identifier[type] == literal[string] :
identifier[op] = identifier[op] keyword[or] literal[string]
keyword[if] identifier[op] == literal[string] :
identifier[l] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[l] keyword[if] identifier[x] ]
keyword[if] keyword[not] identifier[getattr] ( identifier[target_record] , identifier[field] . identifier[name] ) keyword[and] identifier[l] :
identifier[vals] [ identifier[field] . identifier[name] ]= identifier[l] [ literal[int] ]
keyword[elif] identifier[field] . identifier[type] == literal[string] :
identifier[op] = identifier[op] keyword[or] literal[string]
keyword[if] identifier[op] == literal[string] :
keyword[if] keyword[not] identifier[getattr] ( identifier[target_record] , identifier[field] . identifier[name] ) keyword[and] identifier[l] :
identifier[vals] [ identifier[field] . identifier[name] ]= identifier[l] [ literal[int] ]
identifier[new_vals] ={}
keyword[for] identifier[f] keyword[in] identifier[vals] :
keyword[if] identifier[model] . identifier[_fields] [ identifier[f] ]. identifier[type] != literal[string] :
keyword[if] identifier[vals] [ identifier[f] ]!= identifier[getattr] ( identifier[target_record] , identifier[f] ):
identifier[new_vals] [ identifier[f] ]= identifier[vals] [ identifier[f] ]
keyword[else] :
keyword[if] [ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[vals] [ identifier[f] ]] keyword[not] keyword[in] identifier[getattr] ( identifier[target_record] , identifier[f] ). identifier[ids] :
identifier[new_vals] [ identifier[f] ]= identifier[vals] [ identifier[f] ]
keyword[if] identifier[new_vals] :
identifier[target_record] . identifier[write] ( identifier[new_vals] )
identifier[logger] . identifier[debug] (
literal[string] ,
identifier[len] ( identifier[new_vals] )+ identifier[o2m_changes] , identifier[target_record_id] , identifier[model_name] ,
)
|
def _adjust_merged_values_orm(env, model_name, record_ids, target_record_id, field_spec):
"""This method deals with the values on the records to be merged +
the target record, performing operations that makes sense on the meaning
of the model.
:param field_spec: Dictionary with field names as keys and forced operation
to perform as values. If a field is not present here, default operation
will be performed.
Possible operations by field types:
* Char, Text and Html fields:
- 'merge' (default for Text and Html): content is concatenated
with an ' | ' as separator
- other value (default for Char): content on target record is preserved
* Integer, Float and Monetary fields:
- 'sum' (default for Float and Monetary): Sum all the values of
the records.
- 'avg': Perform the arithmetic average of the values of the records.
- 'max': Put the maximum of all the values.
- 'min': Put the minimum of all the values.
- other value (default for Integer): content on target record
is preserved
* Binary field:
- 'merge' (default): apply first not null value of the records if
value of target record is null, preserve target value otherwise.
- other value: content on target record is preserved
* Boolean field:
- 'and': Perform a logical AND over all values.
- 'or': Perform a logical OR over all values.
- other value (default): content on target record is preserved
* Date and Datetime fields:
- 'max': Put the maximum of all the values.
- 'min': Put the minimum of all the values.
- other value (default): content on target record is preserved
* Many2one fields:
- 'merge' (default): apply first not null value of the records if
value of target record is null, preserve target value otherwise.
- other value: content on target record is preserved
* Many2many fields:
- 'merge' (default): combine all the values
- other value: content on target record is preserved
* One2many fields:
- 'merge' (default): combine all the values
- other value: content on target record is preserved
* Reference fields:
- any value: content on target record is preserved
* Selection fields:
- any value: content on target record is preserved
"""
model = env[model_name]
fields = model._fields.values()
all_records = model.browse(tuple(record_ids) + (target_record_id,))
target_record = model.browse(target_record_id)
vals = {}
o2m_changes = 0
for field in fields:
if not field.store or field.compute or field.related:
continue # don't do anything on these cases # depends on [control=['if'], data=[]]
op = field_spec.get(field.name, False)
l = all_records.mapped(field.name)
if field.type in ('char', 'text', 'html'):
if not op:
op = 'other' if field.type == 'char' else 'merge' # depends on [control=['if'], data=[]]
if op == 'merge':
l = filter(lambda x: x is not False, l)
vals[field.name] = ' | '.join(l) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif field.type in ('integer', 'float', 'monetary'):
if not op:
op = 'other' if field.type == 'integer' else 'sum' # depends on [control=['if'], data=[]]
if op == 'sum':
vals[field.name] = sum(l) # depends on [control=['if'], data=[]]
elif op == 'avg':
vals[field.name] = sum(l) / len(l) # depends on [control=['if'], data=[]]
elif op == 'max':
vals[field.name] = max(l) # depends on [control=['if'], data=[]]
elif op == 'min':
vals[field.name] = min(l) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif field.type == 'boolean':
op = op or 'other'
if op == 'and':
vals[field.name] = functools.reduce(lambda x, y: x & y, l) # depends on [control=['if'], data=[]]
elif op == 'or':
vals[field.name] = functools.reduce(lambda x, y: x | y, l) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif field.type in ('date', 'datetime'):
if op:
l = filter(lambda x: x is not False, l) # depends on [control=['if'], data=[]]
op = op or 'other'
if op == 'max':
vals[field.name] = max(l) # depends on [control=['if'], data=[]]
elif op == 'min':
vals[field.name] = min(l) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif field.type == 'many2many':
op = op or 'merge'
if op == 'merge':
l = filter(lambda x: x is not False, l)
vals[field.name] = [(4, x.id) for x in l] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif field.type == 'one2many':
op = op or 'merge'
if op == 'merge':
o2m_changes += 1
l.write({field.inverse_name: target_record_id}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif field.type == 'binary':
op = op or 'merge'
if op == 'merge':
l = [x for x in l if x]
if not getattr(target_record, field.name) and l:
vals[field.name] = l[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif field.type == 'many2one':
op = op or 'merge'
if op == 'merge':
if not getattr(target_record, field.name) and l:
vals[field.name] = l[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
# Curate values that haven't changed
new_vals = {}
for f in vals:
if model._fields[f].type != 'many2many':
if vals[f] != getattr(target_record, f):
new_vals[f] = vals[f] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif [x[1] for x in vals[f]] not in getattr(target_record, f).ids:
new_vals[f] = vals[f] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
if new_vals:
target_record.write(new_vals)
logger.debug("Write %s value(s) in target record '%s' of model '%s'", len(new_vals) + o2m_changes, target_record_id, model_name) # depends on [control=['if'], data=[]]
|
def max_sharpe(self, risk_free_rate=0.02):
"""
Maximise the Sharpe Ratio. The result is also referred to as the tangency portfolio,
as it is the tangent to the efficient frontier curve that intercepts the risk-free
rate.
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:raises ValueError: if ``risk_free_rate`` is non-numeric
:return: asset weights for the Sharpe-maximising portfolio
:rtype: dict
"""
if not isinstance(risk_free_rate, (int, float)):
raise ValueError("risk_free_rate should be numeric")
args = (self.expected_returns, self.cov_matrix, self.gamma, risk_free_rate)
result = sco.minimize(
objective_functions.negative_sharpe,
x0=self.initial_guess,
args=args,
method="SLSQP",
bounds=self.bounds,
constraints=self.constraints,
)
self.weights = result["x"]
return dict(zip(self.tickers, self.weights))
|
def function[max_sharpe, parameter[self, risk_free_rate]]:
constant[
Maximise the Sharpe Ratio. The result is also referred to as the tangency portfolio,
as it is the tangent to the efficient frontier curve that intercepts the risk-free
rate.
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:raises ValueError: if ``risk_free_rate`` is non-numeric
:return: asset weights for the Sharpe-maximising portfolio
:rtype: dict
]
if <ast.UnaryOp object at 0x7da18dc98b50> begin[:]
<ast.Raise object at 0x7da18dc99a50>
variable[args] assign[=] tuple[[<ast.Attribute object at 0x7da18dc99390>, <ast.Attribute object at 0x7da18dc98f70>, <ast.Attribute object at 0x7da18dc988b0>, <ast.Name object at 0x7da18dc9a830>]]
variable[result] assign[=] call[name[sco].minimize, parameter[name[objective_functions].negative_sharpe]]
name[self].weights assign[=] call[name[result]][constant[x]]
return[call[name[dict], parameter[call[name[zip], parameter[name[self].tickers, name[self].weights]]]]]
|
keyword[def] identifier[max_sharpe] ( identifier[self] , identifier[risk_free_rate] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[risk_free_rate] ,( identifier[int] , identifier[float] )):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[args] =( identifier[self] . identifier[expected_returns] , identifier[self] . identifier[cov_matrix] , identifier[self] . identifier[gamma] , identifier[risk_free_rate] )
identifier[result] = identifier[sco] . identifier[minimize] (
identifier[objective_functions] . identifier[negative_sharpe] ,
identifier[x0] = identifier[self] . identifier[initial_guess] ,
identifier[args] = identifier[args] ,
identifier[method] = literal[string] ,
identifier[bounds] = identifier[self] . identifier[bounds] ,
identifier[constraints] = identifier[self] . identifier[constraints] ,
)
identifier[self] . identifier[weights] = identifier[result] [ literal[string] ]
keyword[return] identifier[dict] ( identifier[zip] ( identifier[self] . identifier[tickers] , identifier[self] . identifier[weights] ))
|
def max_sharpe(self, risk_free_rate=0.02):
"""
Maximise the Sharpe Ratio. The result is also referred to as the tangency portfolio,
as it is the tangent to the efficient frontier curve that intercepts the risk-free
rate.
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:raises ValueError: if ``risk_free_rate`` is non-numeric
:return: asset weights for the Sharpe-maximising portfolio
:rtype: dict
"""
if not isinstance(risk_free_rate, (int, float)):
raise ValueError('risk_free_rate should be numeric') # depends on [control=['if'], data=[]]
args = (self.expected_returns, self.cov_matrix, self.gamma, risk_free_rate)
result = sco.minimize(objective_functions.negative_sharpe, x0=self.initial_guess, args=args, method='SLSQP', bounds=self.bounds, constraints=self.constraints)
self.weights = result['x']
return dict(zip(self.tickers, self.weights))
|
def function_call_prepare_action(self, text, loc, fun):
"""Code executed after recognising a function call (type and function name)"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("FUN_PREP:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.lookup_symbol(fun.name, SharedData.KINDS.FUNCTION)
if index == None:
raise SemanticException("'%s' is not a function" % fun.name)
#save any previous function call data (for nested function calls)
self.function_call_stack.append(self.function_call_index)
self.function_call_index = index
self.function_arguments_stack.append(self.function_arguments[:])
del self.function_arguments[:]
self.codegen.save_used_registers()
|
def function[function_call_prepare_action, parameter[self, text, loc, fun]]:
constant[Code executed after recognising a function call (type and function name)]
call[name[exshared].setpos, parameter[name[loc], name[text]]]
if compare[name[DEBUG] greater[>] constant[0]] begin[:]
call[name[print], parameter[constant[FUN_PREP:], name[fun]]]
if compare[name[DEBUG] equal[==] constant[2]] begin[:]
call[name[self].symtab.display, parameter[]]
if compare[name[DEBUG] greater[>] constant[2]] begin[:]
return[None]
variable[index] assign[=] call[name[self].symtab.lookup_symbol, parameter[name[fun].name, name[SharedData].KINDS.FUNCTION]]
if compare[name[index] equal[==] constant[None]] begin[:]
<ast.Raise object at 0x7da18f09f5e0>
call[name[self].function_call_stack.append, parameter[name[self].function_call_index]]
name[self].function_call_index assign[=] name[index]
call[name[self].function_arguments_stack.append, parameter[call[name[self].function_arguments][<ast.Slice object at 0x7da1b23444c0>]]]
<ast.Delete object at 0x7da1b2346b90>
call[name[self].codegen.save_used_registers, parameter[]]
|
keyword[def] identifier[function_call_prepare_action] ( identifier[self] , identifier[text] , identifier[loc] , identifier[fun] ):
literal[string]
identifier[exshared] . identifier[setpos] ( identifier[loc] , identifier[text] )
keyword[if] identifier[DEBUG] > literal[int] :
identifier[print] ( literal[string] , identifier[fun] )
keyword[if] identifier[DEBUG] == literal[int] : identifier[self] . identifier[symtab] . identifier[display] ()
keyword[if] identifier[DEBUG] > literal[int] : keyword[return]
identifier[index] = identifier[self] . identifier[symtab] . identifier[lookup_symbol] ( identifier[fun] . identifier[name] , identifier[SharedData] . identifier[KINDS] . identifier[FUNCTION] )
keyword[if] identifier[index] == keyword[None] :
keyword[raise] identifier[SemanticException] ( literal[string] % identifier[fun] . identifier[name] )
identifier[self] . identifier[function_call_stack] . identifier[append] ( identifier[self] . identifier[function_call_index] )
identifier[self] . identifier[function_call_index] = identifier[index]
identifier[self] . identifier[function_arguments_stack] . identifier[append] ( identifier[self] . identifier[function_arguments] [:])
keyword[del] identifier[self] . identifier[function_arguments] [:]
identifier[self] . identifier[codegen] . identifier[save_used_registers] ()
|
def function_call_prepare_action(self, text, loc, fun):
"""Code executed after recognising a function call (type and function name)"""
exshared.setpos(loc, text)
if DEBUG > 0:
print('FUN_PREP:', fun)
if DEBUG == 2:
self.symtab.display() # depends on [control=['if'], data=[]]
if DEBUG > 2:
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['DEBUG']]
index = self.symtab.lookup_symbol(fun.name, SharedData.KINDS.FUNCTION)
if index == None:
raise SemanticException("'%s' is not a function" % fun.name) # depends on [control=['if'], data=[]] #save any previous function call data (for nested function calls)
self.function_call_stack.append(self.function_call_index)
self.function_call_index = index
self.function_arguments_stack.append(self.function_arguments[:])
del self.function_arguments[:]
self.codegen.save_used_registers()
|
def process_new_issues(self, volumes, existing_issues):
"""Takes a dict of existing volumes missing tags and a dict of existing issues, and finds any new or updated
issues.
Args:
volumes (:obj:`dict` of `str`: `EBSVolume`): Dict of current volumes with issues
existing_issues (:obj:`dict` of `str`: `EBSVolumeAuditIssue`): Current list of issues
Returns:
:obj:`dict` of `str`: `EBSVolumeAuditIssue`
"""
new_issues = {}
for issue_id, volume in volumes.items():
state = EBSIssueState.DETECTED.value
if issue_id in existing_issues:
issue = existing_issues[issue_id]
data = {
'state': state,
'notes': issue.notes,
'last_notice': issue.last_notice
}
if issue.update(data):
new_issues.setdefault(issue.volume.account, []).append(issue)
self.log.debug('Updated EBSVolumeAuditIssue {}'.format(
issue_id
))
else:
properties = {
'volume_id': volume.id,
'account_id': volume.account_id,
'location': volume.location,
'state': state,
'last_change': datetime.now(),
'last_notice': None,
'notes': []
}
issue = EBSVolumeAuditIssue.create(issue_id, properties=properties)
new_issues.setdefault(issue.volume.account, []).append(issue)
return new_issues
|
def function[process_new_issues, parameter[self, volumes, existing_issues]]:
constant[Takes a dict of existing volumes missing tags and a dict of existing issues, and finds any new or updated
issues.
Args:
volumes (:obj:`dict` of `str`: `EBSVolume`): Dict of current volumes with issues
existing_issues (:obj:`dict` of `str`: `EBSVolumeAuditIssue`): Current list of issues
Returns:
:obj:`dict` of `str`: `EBSVolumeAuditIssue`
]
variable[new_issues] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1e917b0>, <ast.Name object at 0x7da1b1e918a0>]]] in starred[call[name[volumes].items, parameter[]]] begin[:]
variable[state] assign[=] name[EBSIssueState].DETECTED.value
if compare[name[issue_id] in name[existing_issues]] begin[:]
variable[issue] assign[=] call[name[existing_issues]][name[issue_id]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b1e93f10>, <ast.Constant object at 0x7da1b1e93a30>, <ast.Constant object at 0x7da1b1e90cd0>], [<ast.Name object at 0x7da1b1e90ac0>, <ast.Attribute object at 0x7da1b1e91ea0>, <ast.Attribute object at 0x7da1b1e91bd0>]]
if call[name[issue].update, parameter[name[data]]] begin[:]
call[call[name[new_issues].setdefault, parameter[name[issue].volume.account, list[[]]]].append, parameter[name[issue]]]
call[name[self].log.debug, parameter[call[constant[Updated EBSVolumeAuditIssue {}].format, parameter[name[issue_id]]]]]
return[name[new_issues]]
|
keyword[def] identifier[process_new_issues] ( identifier[self] , identifier[volumes] , identifier[existing_issues] ):
literal[string]
identifier[new_issues] ={}
keyword[for] identifier[issue_id] , identifier[volume] keyword[in] identifier[volumes] . identifier[items] ():
identifier[state] = identifier[EBSIssueState] . identifier[DETECTED] . identifier[value]
keyword[if] identifier[issue_id] keyword[in] identifier[existing_issues] :
identifier[issue] = identifier[existing_issues] [ identifier[issue_id] ]
identifier[data] ={
literal[string] : identifier[state] ,
literal[string] : identifier[issue] . identifier[notes] ,
literal[string] : identifier[issue] . identifier[last_notice]
}
keyword[if] identifier[issue] . identifier[update] ( identifier[data] ):
identifier[new_issues] . identifier[setdefault] ( identifier[issue] . identifier[volume] . identifier[account] ,[]). identifier[append] ( identifier[issue] )
identifier[self] . identifier[log] . identifier[debug] ( literal[string] . identifier[format] (
identifier[issue_id]
))
keyword[else] :
identifier[properties] ={
literal[string] : identifier[volume] . identifier[id] ,
literal[string] : identifier[volume] . identifier[account_id] ,
literal[string] : identifier[volume] . identifier[location] ,
literal[string] : identifier[state] ,
literal[string] : identifier[datetime] . identifier[now] (),
literal[string] : keyword[None] ,
literal[string] :[]
}
identifier[issue] = identifier[EBSVolumeAuditIssue] . identifier[create] ( identifier[issue_id] , identifier[properties] = identifier[properties] )
identifier[new_issues] . identifier[setdefault] ( identifier[issue] . identifier[volume] . identifier[account] ,[]). identifier[append] ( identifier[issue] )
keyword[return] identifier[new_issues]
|
def process_new_issues(self, volumes, existing_issues):
"""Takes a dict of existing volumes missing tags and a dict of existing issues, and finds any new or updated
issues.
Args:
volumes (:obj:`dict` of `str`: `EBSVolume`): Dict of current volumes with issues
existing_issues (:obj:`dict` of `str`: `EBSVolumeAuditIssue`): Current list of issues
Returns:
:obj:`dict` of `str`: `EBSVolumeAuditIssue`
"""
new_issues = {}
for (issue_id, volume) in volumes.items():
state = EBSIssueState.DETECTED.value
if issue_id in existing_issues:
issue = existing_issues[issue_id]
data = {'state': state, 'notes': issue.notes, 'last_notice': issue.last_notice}
if issue.update(data):
new_issues.setdefault(issue.volume.account, []).append(issue)
self.log.debug('Updated EBSVolumeAuditIssue {}'.format(issue_id)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['issue_id', 'existing_issues']]
else:
properties = {'volume_id': volume.id, 'account_id': volume.account_id, 'location': volume.location, 'state': state, 'last_change': datetime.now(), 'last_notice': None, 'notes': []}
issue = EBSVolumeAuditIssue.create(issue_id, properties=properties)
new_issues.setdefault(issue.volume.account, []).append(issue) # depends on [control=['for'], data=[]]
return new_issues
|
def search(self, base_dn, search_filter, attributes=()):
"""Perform an AD search
:param str base_dn: The base DN to search within
:param str search_filter: The search filter to apply, such as:
*objectClass=person*
:param list attributes: Object attributes to populate, defaults to all
"""
results = []
page = 0
while page == 0 or self.sprc.cookie:
page += 1
#pylint: disable=no-member
message_id = self.ldap.search_ext(base_dn, ldap.SCOPE_SUBTREE,
search_filter, attributes,
serverctrls=[self.sprc])
#pylint: enable=no-member
data, server_controls = self.ldap.result3(message_id)[1::2]
self.sprc.cookie = server_controls[0].cookie
logging.debug('%s - Page %s results: %s', \
self.__class__.__name__, page, ', '.join(k[0] for k in data))
results += [u for u in data]
return results
|
def function[search, parameter[self, base_dn, search_filter, attributes]]:
constant[Perform an AD search
:param str base_dn: The base DN to search within
:param str search_filter: The search filter to apply, such as:
*objectClass=person*
:param list attributes: Object attributes to populate, defaults to all
]
variable[results] assign[=] list[[]]
variable[page] assign[=] constant[0]
while <ast.BoolOp object at 0x7da1b09cd6c0> begin[:]
<ast.AugAssign object at 0x7da1b09cf100>
variable[message_id] assign[=] call[name[self].ldap.search_ext, parameter[name[base_dn], name[ldap].SCOPE_SUBTREE, name[search_filter], name[attributes]]]
<ast.Tuple object at 0x7da1b09ce4d0> assign[=] call[call[name[self].ldap.result3, parameter[name[message_id]]]][<ast.Slice object at 0x7da1b09cead0>]
name[self].sprc.cookie assign[=] call[name[server_controls]][constant[0]].cookie
call[name[logging].debug, parameter[constant[%s - Page %s results: %s], name[self].__class__.__name__, name[page], call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da1b09eafe0>]]]]
<ast.AugAssign object at 0x7da1b09ea7d0>
return[name[results]]
|
keyword[def] identifier[search] ( identifier[self] , identifier[base_dn] , identifier[search_filter] , identifier[attributes] =()):
literal[string]
identifier[results] =[]
identifier[page] = literal[int]
keyword[while] identifier[page] == literal[int] keyword[or] identifier[self] . identifier[sprc] . identifier[cookie] :
identifier[page] += literal[int]
identifier[message_id] = identifier[self] . identifier[ldap] . identifier[search_ext] ( identifier[base_dn] , identifier[ldap] . identifier[SCOPE_SUBTREE] ,
identifier[search_filter] , identifier[attributes] ,
identifier[serverctrls] =[ identifier[self] . identifier[sprc] ])
identifier[data] , identifier[server_controls] = identifier[self] . identifier[ldap] . identifier[result3] ( identifier[message_id] )[ literal[int] :: literal[int] ]
identifier[self] . identifier[sprc] . identifier[cookie] = identifier[server_controls] [ literal[int] ]. identifier[cookie]
identifier[logging] . identifier[debug] ( literal[string] , identifier[self] . identifier[__class__] . identifier[__name__] , identifier[page] , literal[string] . identifier[join] ( identifier[k] [ literal[int] ] keyword[for] identifier[k] keyword[in] identifier[data] ))
identifier[results] +=[ identifier[u] keyword[for] identifier[u] keyword[in] identifier[data] ]
keyword[return] identifier[results]
|
def search(self, base_dn, search_filter, attributes=()):
"""Perform an AD search
:param str base_dn: The base DN to search within
:param str search_filter: The search filter to apply, such as:
*objectClass=person*
:param list attributes: Object attributes to populate, defaults to all
"""
results = []
page = 0
while page == 0 or self.sprc.cookie:
page += 1
#pylint: disable=no-member
message_id = self.ldap.search_ext(base_dn, ldap.SCOPE_SUBTREE, search_filter, attributes, serverctrls=[self.sprc])
#pylint: enable=no-member
(data, server_controls) = self.ldap.result3(message_id)[1::2]
self.sprc.cookie = server_controls[0].cookie
logging.debug('%s - Page %s results: %s', self.__class__.__name__, page, ', '.join((k[0] for k in data)))
results += [u for u in data] # depends on [control=['while'], data=[]]
return results
|
def reps_to_intensity(reps, slope=-4.8, constant=97.5, quadratic=True):
"""A function mapping from repetitions in the range 1 to 12
to intensities in the range 0 to 100.
Parameters
----------
reps
The number of repetitions to map to the intensity range.
slope
Slope for the linear function.
constant
Constant for the linear function
quadratic
If 'True', add a slight quadratic offset.
Returns
-------
intensity
An intensity value in the range from 0 to 100.
Examples
-------
>>> reps_to_intensity(5, slope = -5, constant = 100, quadratic = False)
80
>>> reps_to_intensity(8, slope = -5, constant = 100, quadratic = True)
67.45
>>> reps_to_intensity(8, slope = -5, constant = 100, quadratic = False)
65
"""
intensity = constant + slope * (reps - 1)
if quadratic:
return intensity + 0.05 * (reps - 1) ** 2
else:
return intensity
|
def function[reps_to_intensity, parameter[reps, slope, constant, quadratic]]:
constant[A function mapping from repetitions in the range 1 to 12
to intensities in the range 0 to 100.
Parameters
----------
reps
The number of repetitions to map to the intensity range.
slope
Slope for the linear function.
constant
Constant for the linear function
quadratic
If 'True', add a slight quadratic offset.
Returns
-------
intensity
An intensity value in the range from 0 to 100.
Examples
-------
>>> reps_to_intensity(5, slope = -5, constant = 100, quadratic = False)
80
>>> reps_to_intensity(8, slope = -5, constant = 100, quadratic = True)
67.45
>>> reps_to_intensity(8, slope = -5, constant = 100, quadratic = False)
65
]
variable[intensity] assign[=] binary_operation[name[constant] + binary_operation[name[slope] * binary_operation[name[reps] - constant[1]]]]
if name[quadratic] begin[:]
return[binary_operation[name[intensity] + binary_operation[constant[0.05] * binary_operation[binary_operation[name[reps] - constant[1]] ** constant[2]]]]]
|
keyword[def] identifier[reps_to_intensity] ( identifier[reps] , identifier[slope] =- literal[int] , identifier[constant] = literal[int] , identifier[quadratic] = keyword[True] ):
literal[string]
identifier[intensity] = identifier[constant] + identifier[slope] *( identifier[reps] - literal[int] )
keyword[if] identifier[quadratic] :
keyword[return] identifier[intensity] + literal[int] *( identifier[reps] - literal[int] )** literal[int]
keyword[else] :
keyword[return] identifier[intensity]
|
def reps_to_intensity(reps, slope=-4.8, constant=97.5, quadratic=True):
"""A function mapping from repetitions in the range 1 to 12
to intensities in the range 0 to 100.
Parameters
----------
reps
The number of repetitions to map to the intensity range.
slope
Slope for the linear function.
constant
Constant for the linear function
quadratic
If 'True', add a slight quadratic offset.
Returns
-------
intensity
An intensity value in the range from 0 to 100.
Examples
-------
>>> reps_to_intensity(5, slope = -5, constant = 100, quadratic = False)
80
>>> reps_to_intensity(8, slope = -5, constant = 100, quadratic = True)
67.45
>>> reps_to_intensity(8, slope = -5, constant = 100, quadratic = False)
65
"""
intensity = constant + slope * (reps - 1)
if quadratic:
return intensity + 0.05 * (reps - 1) ** 2 # depends on [control=['if'], data=[]]
else:
return intensity
|
def cmd_link_ports(self):
'''show available ports'''
ports = mavutil.auto_detect_serial(preferred_list=['*FTDI*',"*Arduino_Mega_2560*", "*3D_Robotics*", "*USB_to_UART*", '*PX4*', '*FMU*'])
for p in ports:
print("%s : %s : %s" % (p.device, p.description, p.hwid))
|
def function[cmd_link_ports, parameter[self]]:
constant[show available ports]
variable[ports] assign[=] call[name[mavutil].auto_detect_serial, parameter[]]
for taget[name[p]] in starred[name[ports]] begin[:]
call[name[print], parameter[binary_operation[constant[%s : %s : %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b23477f0>, <ast.Attribute object at 0x7da1b2345a20>, <ast.Attribute object at 0x7da1b2345cf0>]]]]]
|
keyword[def] identifier[cmd_link_ports] ( identifier[self] ):
literal[string]
identifier[ports] = identifier[mavutil] . identifier[auto_detect_serial] ( identifier[preferred_list] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ])
keyword[for] identifier[p] keyword[in] identifier[ports] :
identifier[print] ( literal[string] %( identifier[p] . identifier[device] , identifier[p] . identifier[description] , identifier[p] . identifier[hwid] ))
|
def cmd_link_ports(self):
"""show available ports"""
ports = mavutil.auto_detect_serial(preferred_list=['*FTDI*', '*Arduino_Mega_2560*', '*3D_Robotics*', '*USB_to_UART*', '*PX4*', '*FMU*'])
for p in ports:
print('%s : %s : %s' % (p.device, p.description, p.hwid)) # depends on [control=['for'], data=['p']]
|
def encode(self, word):
"""Return the Russell Index (integer output) of a word.
Parameters
----------
word : str
The word to transform
Returns
-------
int
The Russell Index value
Examples
--------
>>> pe = RussellIndex()
>>> pe.encode('Christopher')
3813428
>>> pe.encode('Niall')
715
>>> pe.encode('Smith')
3614
>>> pe.encode('Schmidt')
3614
"""
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = word.replace('GH', '') # discard gh (rule 3)
word = word.rstrip('SZ') # discard /[sz]$/ (rule 3)
# translate according to Russell's mapping
word = ''.join(c for c in word if c in self._uc_set)
sdx = word.translate(self._trans)
# remove any 1s after the first occurrence
one = sdx.find('1') + 1
if one:
sdx = sdx[:one] + ''.join(c for c in sdx[one:] if c != '1')
# remove repeating characters
sdx = self._delete_consecutive_repeats(sdx)
# return as an int
return int(sdx) if sdx else float('NaN')
|
def function[encode, parameter[self, word]]:
constant[Return the Russell Index (integer output) of a word.
Parameters
----------
word : str
The word to transform
Returns
-------
int
The Russell Index value
Examples
--------
>>> pe = RussellIndex()
>>> pe.encode('Christopher')
3813428
>>> pe.encode('Niall')
715
>>> pe.encode('Smith')
3614
>>> pe.encode('Schmidt')
3614
]
variable[word] assign[=] call[name[unicode_normalize], parameter[constant[NFKD], call[name[text_type], parameter[call[name[word].upper, parameter[]]]]]]
variable[word] assign[=] call[name[word].replace, parameter[constant[ß], constant[SS]]]
variable[word] assign[=] call[name[word].replace, parameter[constant[GH], constant[]]]
variable[word] assign[=] call[name[word].rstrip, parameter[constant[SZ]]]
variable[word] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da2054a46d0>]]
variable[sdx] assign[=] call[name[word].translate, parameter[name[self]._trans]]
variable[one] assign[=] binary_operation[call[name[sdx].find, parameter[constant[1]]] + constant[1]]
if name[one] begin[:]
variable[sdx] assign[=] binary_operation[call[name[sdx]][<ast.Slice object at 0x7da2054a4e50>] + call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da2054a4a00>]]]
variable[sdx] assign[=] call[name[self]._delete_consecutive_repeats, parameter[name[sdx]]]
return[<ast.IfExp object at 0x7da2054a5030>]
|
keyword[def] identifier[encode] ( identifier[self] , identifier[word] ):
literal[string]
identifier[word] = identifier[unicode_normalize] ( literal[string] , identifier[text_type] ( identifier[word] . identifier[upper] ()))
identifier[word] = identifier[word] . identifier[replace] ( literal[string] , literal[string] )
identifier[word] = identifier[word] . identifier[replace] ( literal[string] , literal[string] )
identifier[word] = identifier[word] . identifier[rstrip] ( literal[string] )
identifier[word] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[word] keyword[if] identifier[c] keyword[in] identifier[self] . identifier[_uc_set] )
identifier[sdx] = identifier[word] . identifier[translate] ( identifier[self] . identifier[_trans] )
identifier[one] = identifier[sdx] . identifier[find] ( literal[string] )+ literal[int]
keyword[if] identifier[one] :
identifier[sdx] = identifier[sdx] [: identifier[one] ]+ literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[sdx] [ identifier[one] :] keyword[if] identifier[c] != literal[string] )
identifier[sdx] = identifier[self] . identifier[_delete_consecutive_repeats] ( identifier[sdx] )
keyword[return] identifier[int] ( identifier[sdx] ) keyword[if] identifier[sdx] keyword[else] identifier[float] ( literal[string] )
|
def encode(self, word):
"""Return the Russell Index (integer output) of a word.
Parameters
----------
word : str
The word to transform
Returns
-------
int
The Russell Index value
Examples
--------
>>> pe = RussellIndex()
>>> pe.encode('Christopher')
3813428
>>> pe.encode('Niall')
715
>>> pe.encode('Smith')
3614
>>> pe.encode('Schmidt')
3614
"""
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = word.replace('GH', '') # discard gh (rule 3)
word = word.rstrip('SZ') # discard /[sz]$/ (rule 3)
# translate according to Russell's mapping
word = ''.join((c for c in word if c in self._uc_set))
sdx = word.translate(self._trans)
# remove any 1s after the first occurrence
one = sdx.find('1') + 1
if one:
sdx = sdx[:one] + ''.join((c for c in sdx[one:] if c != '1')) # depends on [control=['if'], data=[]]
# remove repeating characters
sdx = self._delete_consecutive_repeats(sdx)
# return as an int
return int(sdx) if sdx else float('NaN')
|
def get_subpath(self, subpath: str):
"""Search a file or directory relative to the base path"""
for d in self._path:
if os.path.exists(os.path.join(d, subpath)):
return os.path.join(d, subpath)
raise FileNotFoundError
|
def function[get_subpath, parameter[self, subpath]]:
constant[Search a file or directory relative to the base path]
for taget[name[d]] in starred[name[self]._path] begin[:]
if call[name[os].path.exists, parameter[call[name[os].path.join, parameter[name[d], name[subpath]]]]] begin[:]
return[call[name[os].path.join, parameter[name[d], name[subpath]]]]
<ast.Raise object at 0x7da1b1074d30>
|
keyword[def] identifier[get_subpath] ( identifier[self] , identifier[subpath] : identifier[str] ):
literal[string]
keyword[for] identifier[d] keyword[in] identifier[self] . identifier[_path] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[subpath] )):
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[subpath] )
keyword[raise] identifier[FileNotFoundError]
|
def get_subpath(self, subpath: str):
"""Search a file or directory relative to the base path"""
for d in self._path:
if os.path.exists(os.path.join(d, subpath)):
return os.path.join(d, subpath) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']]
raise FileNotFoundError
|
def find(self, username):
"""
Find user with given username.
Args:
username Username of the user to search for
Raises:
ldap_tools.exceptions.NoUserFound: No users returned by LDAP
ldap_tools.exceptions.TooManyResults:
Multiple users returned by LDAP
"""
filter = ['(uid={})'.format(username)]
results = self.client.search(filter)
if len(results) < 1:
raise ldap_tools.exceptions.NoUserFound(
'User ({}) not found'.format(username))
return # pragma: no cover
elif len(results) > 1:
raise ldap_tools.exceptions.TooManyResults(
'Multiple users found. Please narrow your search.')
return # pragma: no cover
else:
return results
|
def function[find, parameter[self, username]]:
constant[
Find user with given username.
Args:
username Username of the user to search for
Raises:
ldap_tools.exceptions.NoUserFound: No users returned by LDAP
ldap_tools.exceptions.TooManyResults:
Multiple users returned by LDAP
]
variable[filter] assign[=] list[[<ast.Call object at 0x7da1b20a8a60>]]
variable[results] assign[=] call[name[self].client.search, parameter[name[filter]]]
if compare[call[name[len], parameter[name[results]]] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da1b20a9990>
return[None]
|
keyword[def] identifier[find] ( identifier[self] , identifier[username] ):
literal[string]
identifier[filter] =[ literal[string] . identifier[format] ( identifier[username] )]
identifier[results] = identifier[self] . identifier[client] . identifier[search] ( identifier[filter] )
keyword[if] identifier[len] ( identifier[results] )< literal[int] :
keyword[raise] identifier[ldap_tools] . identifier[exceptions] . identifier[NoUserFound] (
literal[string] . identifier[format] ( identifier[username] ))
keyword[return]
keyword[elif] identifier[len] ( identifier[results] )> literal[int] :
keyword[raise] identifier[ldap_tools] . identifier[exceptions] . identifier[TooManyResults] (
literal[string] )
keyword[return]
keyword[else] :
keyword[return] identifier[results]
|
def find(self, username):
"""
Find user with given username.
Args:
username Username of the user to search for
Raises:
ldap_tools.exceptions.NoUserFound: No users returned by LDAP
ldap_tools.exceptions.TooManyResults:
Multiple users returned by LDAP
"""
filter = ['(uid={})'.format(username)]
results = self.client.search(filter)
if len(results) < 1:
raise ldap_tools.exceptions.NoUserFound('User ({}) not found'.format(username))
return # pragma: no cover # depends on [control=['if'], data=[]]
elif len(results) > 1:
raise ldap_tools.exceptions.TooManyResults('Multiple users found. Please narrow your search.')
return # pragma: no cover # depends on [control=['if'], data=[]]
else:
return results
|
def _prnt_min_max_val(var, text, verb):
r"""Print variable; if more than three, just min/max, unless verb > 3."""
if var.size > 3:
print(text, _strvar(var.min()), "-", _strvar(var.max()),
":", _strvar(var.size), " [min-max; #]")
if verb > 3:
print(" : ", _strvar(var))
else:
print(text, _strvar(np.atleast_1d(var)))
|
def function[_prnt_min_max_val, parameter[var, text, verb]]:
constant[Print variable; if more than three, just min/max, unless verb > 3.]
if compare[name[var].size greater[>] constant[3]] begin[:]
call[name[print], parameter[name[text], call[name[_strvar], parameter[call[name[var].min, parameter[]]]], constant[-], call[name[_strvar], parameter[call[name[var].max, parameter[]]]], constant[:], call[name[_strvar], parameter[name[var].size]], constant[ [min-max; #]]]]
if compare[name[verb] greater[>] constant[3]] begin[:]
call[name[print], parameter[constant[ : ], call[name[_strvar], parameter[name[var]]]]]
|
keyword[def] identifier[_prnt_min_max_val] ( identifier[var] , identifier[text] , identifier[verb] ):
literal[string]
keyword[if] identifier[var] . identifier[size] > literal[int] :
identifier[print] ( identifier[text] , identifier[_strvar] ( identifier[var] . identifier[min] ()), literal[string] , identifier[_strvar] ( identifier[var] . identifier[max] ()),
literal[string] , identifier[_strvar] ( identifier[var] . identifier[size] ), literal[string] )
keyword[if] identifier[verb] > literal[int] :
identifier[print] ( literal[string] , identifier[_strvar] ( identifier[var] ))
keyword[else] :
identifier[print] ( identifier[text] , identifier[_strvar] ( identifier[np] . identifier[atleast_1d] ( identifier[var] )))
|
def _prnt_min_max_val(var, text, verb):
"""Print variable; if more than three, just min/max, unless verb > 3."""
if var.size > 3:
print(text, _strvar(var.min()), '-', _strvar(var.max()), ':', _strvar(var.size), ' [min-max; #]')
if verb > 3:
print(' : ', _strvar(var)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
print(text, _strvar(np.atleast_1d(var)))
|
def ReqUserLogout(self):
"""退出接口"""
self.logined = False
time.sleep(3)
self.t.ReqUserLogout(BrokerID=self.broker, UserID=self.investor)
self.t.RegisterSpi(None)
self.t.Release()
threading.Thread(target=self.OnDisConnected, args=(self, 0)).start()
|
def function[ReqUserLogout, parameter[self]]:
constant[退出接口]
name[self].logined assign[=] constant[False]
call[name[time].sleep, parameter[constant[3]]]
call[name[self].t.ReqUserLogout, parameter[]]
call[name[self].t.RegisterSpi, parameter[constant[None]]]
call[name[self].t.Release, parameter[]]
call[call[name[threading].Thread, parameter[]].start, parameter[]]
|
keyword[def] identifier[ReqUserLogout] ( identifier[self] ):
literal[string]
identifier[self] . identifier[logined] = keyword[False]
identifier[time] . identifier[sleep] ( literal[int] )
identifier[self] . identifier[t] . identifier[ReqUserLogout] ( identifier[BrokerID] = identifier[self] . identifier[broker] , identifier[UserID] = identifier[self] . identifier[investor] )
identifier[self] . identifier[t] . identifier[RegisterSpi] ( keyword[None] )
identifier[self] . identifier[t] . identifier[Release] ()
identifier[threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[OnDisConnected] , identifier[args] =( identifier[self] , literal[int] )). identifier[start] ()
|
def ReqUserLogout(self):
"""退出接口"""
self.logined = False
time.sleep(3)
self.t.ReqUserLogout(BrokerID=self.broker, UserID=self.investor)
self.t.RegisterSpi(None)
self.t.Release()
threading.Thread(target=self.OnDisConnected, args=(self, 0)).start()
|
def Replace(self, resource, path, type, id, initial_headers, options=None):
"""Replaces a Azure Cosmos resource and returns it.
:param dict resource:
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The new Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'put',
path,
id,
type,
options)
# Replace will use WriteEndpoint since it uses PUT operation
request = request_object._RequestObject(type, documents._OperationType.Replace)
result, self.last_response_headers = self.__Put(path,
request,
resource,
headers)
# update session for request mutates data on server side
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
|
def function[Replace, parameter[self, resource, path, type, id, initial_headers, options]]:
constant[Replaces a Azure Cosmos resource and returns it.
:param dict resource:
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The new Azure Cosmos resource.
:rtype:
dict
]
if compare[name[options] is constant[None]] begin[:]
variable[options] assign[=] dictionary[[], []]
variable[initial_headers] assign[=] <ast.BoolOp object at 0x7da1b172e530>
variable[headers] assign[=] call[name[base].GetHeaders, parameter[name[self], name[initial_headers], constant[put], name[path], name[id], name[type], name[options]]]
variable[request] assign[=] call[name[request_object]._RequestObject, parameter[name[type], name[documents]._OperationType.Replace]]
<ast.Tuple object at 0x7da1b172d870> assign[=] call[name[self].__Put, parameter[name[path], name[request], name[resource], name[headers]]]
call[name[self]._UpdateSessionIfRequired, parameter[name[headers], name[result], name[self].last_response_headers]]
return[name[result]]
|
keyword[def] identifier[Replace] ( identifier[self] , identifier[resource] , identifier[path] , identifier[type] , identifier[id] , identifier[initial_headers] , identifier[options] = keyword[None] ):
literal[string]
keyword[if] identifier[options] keyword[is] keyword[None] :
identifier[options] ={}
identifier[initial_headers] = identifier[initial_headers] keyword[or] identifier[self] . identifier[default_headers]
identifier[headers] = identifier[base] . identifier[GetHeaders] ( identifier[self] ,
identifier[initial_headers] ,
literal[string] ,
identifier[path] ,
identifier[id] ,
identifier[type] ,
identifier[options] )
identifier[request] = identifier[request_object] . identifier[_RequestObject] ( identifier[type] , identifier[documents] . identifier[_OperationType] . identifier[Replace] )
identifier[result] , identifier[self] . identifier[last_response_headers] = identifier[self] . identifier[__Put] ( identifier[path] ,
identifier[request] ,
identifier[resource] ,
identifier[headers] )
identifier[self] . identifier[_UpdateSessionIfRequired] ( identifier[headers] , identifier[result] , identifier[self] . identifier[last_response_headers] )
keyword[return] identifier[result]
|
def Replace(self, resource, path, type, id, initial_headers, options=None):
"""Replaces a Azure Cosmos resource and returns it.
:param dict resource:
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The new Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {} # depends on [control=['if'], data=['options']]
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self, initial_headers, 'put', path, id, type, options)
# Replace will use WriteEndpoint since it uses PUT operation
request = request_object._RequestObject(type, documents._OperationType.Replace)
(result, self.last_response_headers) = self.__Put(path, request, resource, headers)
# update session for request mutates data on server side
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
|
def _create_cpe_parts(self, system, components):
"""
Create the structure to store the input type of system associated
with components of CPE Name (hardware, operating system and software).
:param string system: type of system associated with CPE Name
:param dict components: CPE Name components to store
:returns: None
:exception: KeyError - incorrect system
"""
if system not in CPEComponent.SYSTEM_VALUES:
errmsg = "Key '{0}' is not exist".format(system)
raise ValueError(errmsg)
elements = []
elements.append(components)
pk = CPE._system_and_parts[system]
self[pk] = elements
|
def function[_create_cpe_parts, parameter[self, system, components]]:
constant[
Create the structure to store the input type of system associated
with components of CPE Name (hardware, operating system and software).
:param string system: type of system associated with CPE Name
:param dict components: CPE Name components to store
:returns: None
:exception: KeyError - incorrect system
]
if compare[name[system] <ast.NotIn object at 0x7da2590d7190> name[CPEComponent].SYSTEM_VALUES] begin[:]
variable[errmsg] assign[=] call[constant[Key '{0}' is not exist].format, parameter[name[system]]]
<ast.Raise object at 0x7da20c6e7c40>
variable[elements] assign[=] list[[]]
call[name[elements].append, parameter[name[components]]]
variable[pk] assign[=] call[name[CPE]._system_and_parts][name[system]]
call[name[self]][name[pk]] assign[=] name[elements]
|
keyword[def] identifier[_create_cpe_parts] ( identifier[self] , identifier[system] , identifier[components] ):
literal[string]
keyword[if] identifier[system] keyword[not] keyword[in] identifier[CPEComponent] . identifier[SYSTEM_VALUES] :
identifier[errmsg] = literal[string] . identifier[format] ( identifier[system] )
keyword[raise] identifier[ValueError] ( identifier[errmsg] )
identifier[elements] =[]
identifier[elements] . identifier[append] ( identifier[components] )
identifier[pk] = identifier[CPE] . identifier[_system_and_parts] [ identifier[system] ]
identifier[self] [ identifier[pk] ]= identifier[elements]
|
def _create_cpe_parts(self, system, components):
"""
Create the structure to store the input type of system associated
with components of CPE Name (hardware, operating system and software).
:param string system: type of system associated with CPE Name
:param dict components: CPE Name components to store
:returns: None
:exception: KeyError - incorrect system
"""
if system not in CPEComponent.SYSTEM_VALUES:
errmsg = "Key '{0}' is not exist".format(system)
raise ValueError(errmsg) # depends on [control=['if'], data=['system']]
elements = []
elements.append(components)
pk = CPE._system_and_parts[system]
self[pk] = elements
|
def _verify_docker_image_size(self, image_name):
"""Verifies size of Docker image.
Args:
image_name: name of the Docker image.
Returns:
True if image size is within the limits, False otherwise.
"""
shell_call(['docker', 'pull', image_name])
try:
image_size = subprocess.check_output(
['docker', 'inspect', '--format={{.Size}}', image_name]).strip()
image_size = int(image_size)
except (ValueError, subprocess.CalledProcessError) as e:
logging.error('Failed to determine docker image size: %s', e)
return False
logging.info('Size of docker image %s is %d', image_name, image_size)
if image_size > MAX_DOCKER_IMAGE_SIZE:
logging.error('Image size exceeds limit %d', MAX_DOCKER_IMAGE_SIZE)
return image_size <= MAX_DOCKER_IMAGE_SIZE
|
def function[_verify_docker_image_size, parameter[self, image_name]]:
constant[Verifies size of Docker image.
Args:
image_name: name of the Docker image.
Returns:
True if image size is within the limits, False otherwise.
]
call[name[shell_call], parameter[list[[<ast.Constant object at 0x7da20c794af0>, <ast.Constant object at 0x7da20c7967d0>, <ast.Name object at 0x7da20c794a00>]]]]
<ast.Try object at 0x7da20c7955a0>
call[name[logging].info, parameter[constant[Size of docker image %s is %d], name[image_name], name[image_size]]]
if compare[name[image_size] greater[>] name[MAX_DOCKER_IMAGE_SIZE]] begin[:]
call[name[logging].error, parameter[constant[Image size exceeds limit %d], name[MAX_DOCKER_IMAGE_SIZE]]]
return[compare[name[image_size] less_or_equal[<=] name[MAX_DOCKER_IMAGE_SIZE]]]
|
keyword[def] identifier[_verify_docker_image_size] ( identifier[self] , identifier[image_name] ):
literal[string]
identifier[shell_call] ([ literal[string] , literal[string] , identifier[image_name] ])
keyword[try] :
identifier[image_size] = identifier[subprocess] . identifier[check_output] (
[ literal[string] , literal[string] , literal[string] , identifier[image_name] ]). identifier[strip] ()
identifier[image_size] = identifier[int] ( identifier[image_size] )
keyword[except] ( identifier[ValueError] , identifier[subprocess] . identifier[CalledProcessError] ) keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( literal[string] , identifier[e] )
keyword[return] keyword[False]
identifier[logging] . identifier[info] ( literal[string] , identifier[image_name] , identifier[image_size] )
keyword[if] identifier[image_size] > identifier[MAX_DOCKER_IMAGE_SIZE] :
identifier[logging] . identifier[error] ( literal[string] , identifier[MAX_DOCKER_IMAGE_SIZE] )
keyword[return] identifier[image_size] <= identifier[MAX_DOCKER_IMAGE_SIZE]
|
def _verify_docker_image_size(self, image_name):
"""Verifies size of Docker image.
Args:
image_name: name of the Docker image.
Returns:
True if image size is within the limits, False otherwise.
"""
shell_call(['docker', 'pull', image_name])
try:
image_size = subprocess.check_output(['docker', 'inspect', '--format={{.Size}}', image_name]).strip()
image_size = int(image_size) # depends on [control=['try'], data=[]]
except (ValueError, subprocess.CalledProcessError) as e:
logging.error('Failed to determine docker image size: %s', e)
return False # depends on [control=['except'], data=['e']]
logging.info('Size of docker image %s is %d', image_name, image_size)
if image_size > MAX_DOCKER_IMAGE_SIZE:
logging.error('Image size exceeds limit %d', MAX_DOCKER_IMAGE_SIZE) # depends on [control=['if'], data=['MAX_DOCKER_IMAGE_SIZE']]
return image_size <= MAX_DOCKER_IMAGE_SIZE
|
def make_options_frame(self):
""" make the frame that allows for configuration and classification"""
self.tab_frame = ttk.Notebook(self.option_frame, width=800)
self.tab_configure = tk.Frame(self.tab_frame)
self.tab_classify = tk.Frame(self.tab_frame)
self.make_configure_tab()
self.make_classify_tab()
self.tab_frame.add(self.tab_configure, text="Configure")
self.tab_frame.add(self.tab_classify, text="Classify")
self.tab_frame.pack(fill=tk.BOTH, expand=True)
|
def function[make_options_frame, parameter[self]]:
constant[ make the frame that allows for configuration and classification]
name[self].tab_frame assign[=] call[name[ttk].Notebook, parameter[name[self].option_frame]]
name[self].tab_configure assign[=] call[name[tk].Frame, parameter[name[self].tab_frame]]
name[self].tab_classify assign[=] call[name[tk].Frame, parameter[name[self].tab_frame]]
call[name[self].make_configure_tab, parameter[]]
call[name[self].make_classify_tab, parameter[]]
call[name[self].tab_frame.add, parameter[name[self].tab_configure]]
call[name[self].tab_frame.add, parameter[name[self].tab_classify]]
call[name[self].tab_frame.pack, parameter[]]
|
keyword[def] identifier[make_options_frame] ( identifier[self] ):
literal[string]
identifier[self] . identifier[tab_frame] = identifier[ttk] . identifier[Notebook] ( identifier[self] . identifier[option_frame] , identifier[width] = literal[int] )
identifier[self] . identifier[tab_configure] = identifier[tk] . identifier[Frame] ( identifier[self] . identifier[tab_frame] )
identifier[self] . identifier[tab_classify] = identifier[tk] . identifier[Frame] ( identifier[self] . identifier[tab_frame] )
identifier[self] . identifier[make_configure_tab] ()
identifier[self] . identifier[make_classify_tab] ()
identifier[self] . identifier[tab_frame] . identifier[add] ( identifier[self] . identifier[tab_configure] , identifier[text] = literal[string] )
identifier[self] . identifier[tab_frame] . identifier[add] ( identifier[self] . identifier[tab_classify] , identifier[text] = literal[string] )
identifier[self] . identifier[tab_frame] . identifier[pack] ( identifier[fill] = identifier[tk] . identifier[BOTH] , identifier[expand] = keyword[True] )
|
def make_options_frame(self):
""" make the frame that allows for configuration and classification"""
self.tab_frame = ttk.Notebook(self.option_frame, width=800)
self.tab_configure = tk.Frame(self.tab_frame)
self.tab_classify = tk.Frame(self.tab_frame)
self.make_configure_tab()
self.make_classify_tab()
self.tab_frame.add(self.tab_configure, text='Configure')
self.tab_frame.add(self.tab_classify, text='Classify')
self.tab_frame.pack(fill=tk.BOTH, expand=True)
|
def from_file(self, ifile, codec='ascii'):
"""Read textgrid from stream.
:param file ifile: Stream to read from.
:param str codec: Text encoding for the input. Note that this will be
ignored for binary TextGrids.
"""
if ifile.read(12) == b'ooBinaryFile':
def bin2str(ifile):
textlen = struct.unpack('>h', ifile.read(2))[0]
# Single byte characters
if textlen >= 0:
return ifile.read(textlen).decode('ascii')
# Multi byte characters have initial len -1 and then \xff bytes
elif textlen == -1:
textlen = struct.unpack('>h', ifile.read(2))[0]
data = ifile.read(textlen*2)
# Hack to go from number to unicode in python3 and python2
fun = unichr if 'unichr' in __builtins__ else chr
charlist = (data[i:i+2] for i in range(0, len(data), 2))
return u''.join(
fun(struct.unpack('>h', i)[0]) for i in charlist)
ifile.read(ord(ifile.read(1))) # skip oo type
self.xmin = struct.unpack('>d', ifile.read(8))[0]
self.xmax = struct.unpack('>d', ifile.read(8))[0]
ifile.read(1) # skip <exists>
self.tier_num = struct.unpack('>i', ifile.read(4))[0]
for i in range(self.tier_num):
tier_type = ifile.read(ord(ifile.read(1))).decode('ascii')
name = bin2str(ifile)
tier = Tier(0, 0, name=name, tier_type=tier_type)
self.tiers.append(tier)
tier.xmin = struct.unpack('>d', ifile.read(8))[0]
tier.xmax = struct.unpack('>d', ifile.read(8))[0]
nint = struct.unpack('>i', ifile.read(4))[0]
for i in range(nint):
x1 = struct.unpack('>d', ifile.read(8))[0]
if tier.tier_type == 'IntervalTier':
x2 = struct.unpack('>d', ifile.read(8))[0]
text = bin2str(ifile)
if tier.tier_type == 'IntervalTier':
tier.intervals.append((x1, x2, text))
elif tier.tier_type == 'TextTier':
tier.intervals.append((x1, text))
else:
raise Exception('Tiertype does not exist.')
else:
def nn(ifile, pat):
line = next(ifile).decode(codec)
return pat.search(line).group(1)
regfloat = re.compile('([\d.]+)\s*$', flags=re.UNICODE)
regint = re.compile('([\d]+)\s*$', flags=re.UNICODE)
regstr = re.compile('"(.*)"\s*$', flags=re.UNICODE)
# Skip the Headers and empty line
next(ifile), next(ifile), next(ifile)
self.xmin = float(nn(ifile, regfloat))
self.xmax = float(nn(ifile, regfloat))
# Skip <exists>
line = next(ifile)
short = line.strip() == b'<exists>'
self.tier_num = int(nn(ifile, regint))
not short and next(ifile)
for i in range(self.tier_num):
not short and next(ifile) # skip item[]: and item[\d]:
tier_type = nn(ifile, regstr)
name = nn(ifile, regstr)
tier = Tier(0, 0, name=name, tier_type=tier_type)
self.tiers.append(tier)
tier.xmin = float(nn(ifile, regfloat))
tier.xmax = float(nn(ifile, regfloat))
for i in range(int(nn(ifile, regint))):
not short and next(ifile) # skip intervals [\d]
x1 = float(nn(ifile, regfloat))
if tier.tier_type == 'IntervalTier':
x2 = float(nn(ifile, regfloat))
t = nn(ifile, regstr)
tier.intervals.append((x1, x2, t))
elif tier.tier_type == 'TextTier':
t = nn(ifile, regstr)
tier.intervals.append((x1, t))
|
def function[from_file, parameter[self, ifile, codec]]:
constant[Read textgrid from stream.
:param file ifile: Stream to read from.
:param str codec: Text encoding for the input. Note that this will be
ignored for binary TextGrids.
]
if compare[call[name[ifile].read, parameter[constant[12]]] equal[==] constant[b'ooBinaryFile']] begin[:]
def function[bin2str, parameter[ifile]]:
variable[textlen] assign[=] call[call[name[struct].unpack, parameter[constant[>h], call[name[ifile].read, parameter[constant[2]]]]]][constant[0]]
if compare[name[textlen] greater_or_equal[>=] constant[0]] begin[:]
return[call[call[name[ifile].read, parameter[name[textlen]]].decode, parameter[constant[ascii]]]]
call[name[ifile].read, parameter[call[name[ord], parameter[call[name[ifile].read, parameter[constant[1]]]]]]]
name[self].xmin assign[=] call[call[name[struct].unpack, parameter[constant[>d], call[name[ifile].read, parameter[constant[8]]]]]][constant[0]]
name[self].xmax assign[=] call[call[name[struct].unpack, parameter[constant[>d], call[name[ifile].read, parameter[constant[8]]]]]][constant[0]]
call[name[ifile].read, parameter[constant[1]]]
name[self].tier_num assign[=] call[call[name[struct].unpack, parameter[constant[>i], call[name[ifile].read, parameter[constant[4]]]]]][constant[0]]
for taget[name[i]] in starred[call[name[range], parameter[name[self].tier_num]]] begin[:]
variable[tier_type] assign[=] call[call[name[ifile].read, parameter[call[name[ord], parameter[call[name[ifile].read, parameter[constant[1]]]]]]].decode, parameter[constant[ascii]]]
variable[name] assign[=] call[name[bin2str], parameter[name[ifile]]]
variable[tier] assign[=] call[name[Tier], parameter[constant[0], constant[0]]]
call[name[self].tiers.append, parameter[name[tier]]]
name[tier].xmin assign[=] call[call[name[struct].unpack, parameter[constant[>d], call[name[ifile].read, parameter[constant[8]]]]]][constant[0]]
name[tier].xmax assign[=] call[call[name[struct].unpack, parameter[constant[>d], call[name[ifile].read, parameter[constant[8]]]]]][constant[0]]
variable[nint] assign[=] call[call[name[struct].unpack, parameter[constant[>i], call[name[ifile].read, parameter[constant[4]]]]]][constant[0]]
for taget[name[i]] in starred[call[name[range], parameter[name[nint]]]] begin[:]
variable[x1] assign[=] call[call[name[struct].unpack, parameter[constant[>d], call[name[ifile].read, parameter[constant[8]]]]]][constant[0]]
if compare[name[tier].tier_type equal[==] constant[IntervalTier]] begin[:]
variable[x2] assign[=] call[call[name[struct].unpack, parameter[constant[>d], call[name[ifile].read, parameter[constant[8]]]]]][constant[0]]
variable[text] assign[=] call[name[bin2str], parameter[name[ifile]]]
if compare[name[tier].tier_type equal[==] constant[IntervalTier]] begin[:]
call[name[tier].intervals.append, parameter[tuple[[<ast.Name object at 0x7da1b026cac0>, <ast.Name object at 0x7da1b026fe80>, <ast.Name object at 0x7da1b026d600>]]]]
|
keyword[def] identifier[from_file] ( identifier[self] , identifier[ifile] , identifier[codec] = literal[string] ):
literal[string]
keyword[if] identifier[ifile] . identifier[read] ( literal[int] )== literal[string] :
keyword[def] identifier[bin2str] ( identifier[ifile] ):
identifier[textlen] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[ifile] . identifier[read] ( literal[int] ))[ literal[int] ]
keyword[if] identifier[textlen] >= literal[int] :
keyword[return] identifier[ifile] . identifier[read] ( identifier[textlen] ). identifier[decode] ( literal[string] )
keyword[elif] identifier[textlen] ==- literal[int] :
identifier[textlen] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[ifile] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[data] = identifier[ifile] . identifier[read] ( identifier[textlen] * literal[int] )
identifier[fun] = identifier[unichr] keyword[if] literal[string] keyword[in] identifier[__builtins__] keyword[else] identifier[chr]
identifier[charlist] =( identifier[data] [ identifier[i] : identifier[i] + literal[int] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[data] ), literal[int] ))
keyword[return] literal[string] . identifier[join] (
identifier[fun] ( identifier[struct] . identifier[unpack] ( literal[string] , identifier[i] )[ literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[charlist] )
identifier[ifile] . identifier[read] ( identifier[ord] ( identifier[ifile] . identifier[read] ( literal[int] )))
identifier[self] . identifier[xmin] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[ifile] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[self] . identifier[xmax] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[ifile] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[ifile] . identifier[read] ( literal[int] )
identifier[self] . identifier[tier_num] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[ifile] . identifier[read] ( literal[int] ))[ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[tier_num] ):
identifier[tier_type] = identifier[ifile] . identifier[read] ( identifier[ord] ( identifier[ifile] . identifier[read] ( literal[int] ))). identifier[decode] ( literal[string] )
identifier[name] = identifier[bin2str] ( identifier[ifile] )
identifier[tier] = identifier[Tier] ( literal[int] , literal[int] , identifier[name] = identifier[name] , identifier[tier_type] = identifier[tier_type] )
identifier[self] . identifier[tiers] . identifier[append] ( identifier[tier] )
identifier[tier] . identifier[xmin] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[ifile] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[tier] . identifier[xmax] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[ifile] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[nint] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[ifile] . identifier[read] ( literal[int] ))[ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nint] ):
identifier[x1] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[ifile] . identifier[read] ( literal[int] ))[ literal[int] ]
keyword[if] identifier[tier] . identifier[tier_type] == literal[string] :
identifier[x2] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[ifile] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[text] = identifier[bin2str] ( identifier[ifile] )
keyword[if] identifier[tier] . identifier[tier_type] == literal[string] :
identifier[tier] . identifier[intervals] . identifier[append] (( identifier[x1] , identifier[x2] , identifier[text] ))
keyword[elif] identifier[tier] . identifier[tier_type] == literal[string] :
identifier[tier] . identifier[intervals] . identifier[append] (( identifier[x1] , identifier[text] ))
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[else] :
keyword[def] identifier[nn] ( identifier[ifile] , identifier[pat] ):
identifier[line] = identifier[next] ( identifier[ifile] ). identifier[decode] ( identifier[codec] )
keyword[return] identifier[pat] . identifier[search] ( identifier[line] ). identifier[group] ( literal[int] )
identifier[regfloat] = identifier[re] . identifier[compile] ( literal[string] , identifier[flags] = identifier[re] . identifier[UNICODE] )
identifier[regint] = identifier[re] . identifier[compile] ( literal[string] , identifier[flags] = identifier[re] . identifier[UNICODE] )
identifier[regstr] = identifier[re] . identifier[compile] ( literal[string] , identifier[flags] = identifier[re] . identifier[UNICODE] )
identifier[next] ( identifier[ifile] ), identifier[next] ( identifier[ifile] ), identifier[next] ( identifier[ifile] )
identifier[self] . identifier[xmin] = identifier[float] ( identifier[nn] ( identifier[ifile] , identifier[regfloat] ))
identifier[self] . identifier[xmax] = identifier[float] ( identifier[nn] ( identifier[ifile] , identifier[regfloat] ))
identifier[line] = identifier[next] ( identifier[ifile] )
identifier[short] = identifier[line] . identifier[strip] ()== literal[string]
identifier[self] . identifier[tier_num] = identifier[int] ( identifier[nn] ( identifier[ifile] , identifier[regint] ))
keyword[not] identifier[short] keyword[and] identifier[next] ( identifier[ifile] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[tier_num] ):
keyword[not] identifier[short] keyword[and] identifier[next] ( identifier[ifile] )
identifier[tier_type] = identifier[nn] ( identifier[ifile] , identifier[regstr] )
identifier[name] = identifier[nn] ( identifier[ifile] , identifier[regstr] )
identifier[tier] = identifier[Tier] ( literal[int] , literal[int] , identifier[name] = identifier[name] , identifier[tier_type] = identifier[tier_type] )
identifier[self] . identifier[tiers] . identifier[append] ( identifier[tier] )
identifier[tier] . identifier[xmin] = identifier[float] ( identifier[nn] ( identifier[ifile] , identifier[regfloat] ))
identifier[tier] . identifier[xmax] = identifier[float] ( identifier[nn] ( identifier[ifile] , identifier[regfloat] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[int] ( identifier[nn] ( identifier[ifile] , identifier[regint] ))):
keyword[not] identifier[short] keyword[and] identifier[next] ( identifier[ifile] )
identifier[x1] = identifier[float] ( identifier[nn] ( identifier[ifile] , identifier[regfloat] ))
keyword[if] identifier[tier] . identifier[tier_type] == literal[string] :
identifier[x2] = identifier[float] ( identifier[nn] ( identifier[ifile] , identifier[regfloat] ))
identifier[t] = identifier[nn] ( identifier[ifile] , identifier[regstr] )
identifier[tier] . identifier[intervals] . identifier[append] (( identifier[x1] , identifier[x2] , identifier[t] ))
keyword[elif] identifier[tier] . identifier[tier_type] == literal[string] :
identifier[t] = identifier[nn] ( identifier[ifile] , identifier[regstr] )
identifier[tier] . identifier[intervals] . identifier[append] (( identifier[x1] , identifier[t] ))
|
def from_file(self, ifile, codec='ascii'):
"""Read textgrid from stream.
:param file ifile: Stream to read from.
:param str codec: Text encoding for the input. Note that this will be
ignored for binary TextGrids.
"""
if ifile.read(12) == b'ooBinaryFile':
def bin2str(ifile):
textlen = struct.unpack('>h', ifile.read(2))[0]
# Single byte characters
if textlen >= 0:
return ifile.read(textlen).decode('ascii') # depends on [control=['if'], data=['textlen']]
# Multi byte characters have initial len -1 and then \xff bytes
elif textlen == -1:
textlen = struct.unpack('>h', ifile.read(2))[0]
data = ifile.read(textlen * 2)
# Hack to go from number to unicode in python3 and python2
fun = unichr if 'unichr' in __builtins__ else chr
charlist = (data[i:i + 2] for i in range(0, len(data), 2))
return u''.join((fun(struct.unpack('>h', i)[0]) for i in charlist)) # depends on [control=['if'], data=['textlen']]
ifile.read(ord(ifile.read(1))) # skip oo type
self.xmin = struct.unpack('>d', ifile.read(8))[0]
self.xmax = struct.unpack('>d', ifile.read(8))[0]
ifile.read(1) # skip <exists>
self.tier_num = struct.unpack('>i', ifile.read(4))[0]
for i in range(self.tier_num):
tier_type = ifile.read(ord(ifile.read(1))).decode('ascii')
name = bin2str(ifile)
tier = Tier(0, 0, name=name, tier_type=tier_type)
self.tiers.append(tier)
tier.xmin = struct.unpack('>d', ifile.read(8))[0]
tier.xmax = struct.unpack('>d', ifile.read(8))[0]
nint = struct.unpack('>i', ifile.read(4))[0]
for i in range(nint):
x1 = struct.unpack('>d', ifile.read(8))[0]
if tier.tier_type == 'IntervalTier':
x2 = struct.unpack('>d', ifile.read(8))[0] # depends on [control=['if'], data=[]]
text = bin2str(ifile)
if tier.tier_type == 'IntervalTier':
tier.intervals.append((x1, x2, text)) # depends on [control=['if'], data=[]]
elif tier.tier_type == 'TextTier':
tier.intervals.append((x1, text)) # depends on [control=['if'], data=[]]
else:
raise Exception('Tiertype does not exist.') # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
def nn(ifile, pat):
line = next(ifile).decode(codec)
return pat.search(line).group(1)
regfloat = re.compile('([\\d.]+)\\s*$', flags=re.UNICODE)
regint = re.compile('([\\d]+)\\s*$', flags=re.UNICODE)
regstr = re.compile('"(.*)"\\s*$', flags=re.UNICODE)
# Skip the Headers and empty line
(next(ifile), next(ifile), next(ifile))
self.xmin = float(nn(ifile, regfloat))
self.xmax = float(nn(ifile, regfloat))
# Skip <exists>
line = next(ifile)
short = line.strip() == b'<exists>'
self.tier_num = int(nn(ifile, regint))
not short and next(ifile)
for i in range(self.tier_num):
not short and next(ifile) # skip item[]: and item[\d]:
tier_type = nn(ifile, regstr)
name = nn(ifile, regstr)
tier = Tier(0, 0, name=name, tier_type=tier_type)
self.tiers.append(tier)
tier.xmin = float(nn(ifile, regfloat))
tier.xmax = float(nn(ifile, regfloat))
for i in range(int(nn(ifile, regint))):
not short and next(ifile) # skip intervals [\d]
x1 = float(nn(ifile, regfloat))
if tier.tier_type == 'IntervalTier':
x2 = float(nn(ifile, regfloat))
t = nn(ifile, regstr)
tier.intervals.append((x1, x2, t)) # depends on [control=['if'], data=[]]
elif tier.tier_type == 'TextTier':
t = nn(ifile, regstr)
tier.intervals.append((x1, t)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['i']]
|
def Nu_Yamagata(Re, Pr, Pr_pc=None, Cp_avg=None, Cp_b=None, T_b=None,
T_w=None, T_pc=None):
r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_.
.. math::
Nu_b = 0.0138 Re_b^{0.85}Pr_b^{0.8}F
F = \left(\frac{\bar C_p}{C_{p,b}}\right)^{n_2} \text{ if }
\frac{T_{pc}-T_b}{T_w-T_b} < 0
F = 0.67Pr_{pc}^{-0.05} \left(\frac{\bar C_p}{C_{p,b}}\right)^{n_1}
\text{ if } 0 < \frac{T_{pc}-T_b}{T_w-T_b} < 1
F = 1\text{ if } \frac{T_{pc}-T_b}{T_w-T_b} > 1
n_1 = -0.77(1 + 1/Pr_{pc}) + 1.49
n_2 = 1.44(1 + 1/Pr_{pc}) - 0.53
\bar{Cp} = \frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties, [-]
Pr_pc : float, optional
Prandtl number at the pseudocritical temperature, [-]
Cp_avg : float, optional
Average heat capacity between the wall and bulk temperatures, [J/kg/K]
Cp_b : float, optional
Heat capacity at the bulk temperature, [J/kg/K]
T_b : float
Bulk temperature, [K]
T_w : float
Wall temperature, [K]
T_pc : float
Pseudocritical temperature, i.e. temperature at P where Cp is at a
maximum, [K]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P varied from 22.6 to 29.4
MPa, and D was 7.5 and 10 mm. G varied from 310-1830 kg/m^2/s, q varied
from 116 to 930 kW/m^2, and bulk temperature varied from 230 to 540 decrees
Celsius.
In the database in [3]_, the correlation was considered but not tested.
In [2]_, the correlation was considered but no results were reported.
For enhanced heat transfer database in [2]_, this correlation was the
second best with a MAD of 11.5%. In the database in [3]_, the correlation
was the second best as well.
If the extra information is not provided, the correlation will be used
without the corrections.
Examples
--------
References
----------
.. [1] Yamagata, K, K Nishikawa, S Hasegawa, T Fujii, and S Yoshida.
"Forced Convective Heat Transfer to Supercritical Water Flowing in
Tubes." International Journal of Heat and Mass Transfer 15, no. 12
(December 1, 1972): 2575-93. doi:10.1016/0017-9310(72)90148-2.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
.. [3] Yu, Jiyang, Baoshan Jia, Dan Wu, and Daling Wang. "Optimization of
Heat Transfer Coefficient Correlation at Supercritical Pressure Using
Genetic Algorithms." Heat and Mass Transfer 45, no. 6 (January 8, 2009):
757-66. doi:10.1007/s00231-008-0475-4.
.. [4] Jäger, Wadim, Victor Hugo Sánchez Espinoza, and Antonio Hurtado.
"Review and Proposal for Heat Transfer Predictions at Supercritical
Water Conditions Using Existing Correlations and Experiments." Nuclear
Engineering and Design, (W3MDM) University of Leeds International
Symposium: What Where When? Multi-dimensional Advances for Industrial
Process Monitoring, 241, no. 6 (June 2011): 2184-2203.
doi:10.1016/j.nucengdes.2011.03.022.
'''
F = 1
if all([T_b, T_w, T_pc, Pr_pc, Cp_avg, Cp_b]):
E = (T_pc - T_b)/(T_w - T_b)
if E < 0:
n2 = 1.44*(1 + 1/Pr_pc) - 0.53
F = (Cp_avg/Cp_b)**n2
elif 0 < E < 1:
n1 = -0.77*(1 + 1/Pr_pc) + 1.49
F = 0.67*Pr_pc**-0.05*(Cp_avg/Cp_b)**n1
return 0.0138*Re**0.85*Pr**0.8*F
|
def function[Nu_Yamagata, parameter[Re, Pr, Pr_pc, Cp_avg, Cp_b, T_b, T_w, T_pc]]:
constant[Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_.
.. math::
Nu_b = 0.0138 Re_b^{0.85}Pr_b^{0.8}F
F = \left(\frac{\bar C_p}{C_{p,b}}\right)^{n_2} \text{ if }
\frac{T_{pc}-T_b}{T_w-T_b} < 0
F = 0.67Pr_{pc}^{-0.05} \left(\frac{\bar C_p}{C_{p,b}}\right)^{n_1}
\text{ if } 0 < \frac{T_{pc}-T_b}{T_w-T_b} < 1
F = 1\text{ if } \frac{T_{pc}-T_b}{T_w-T_b} > 1
n_1 = -0.77(1 + 1/Pr_{pc}) + 1.49
n_2 = 1.44(1 + 1/Pr_{pc}) - 0.53
\bar{Cp} = \frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties, [-]
Pr_pc : float, optional
Prandtl number at the pseudocritical temperature, [-]
Cp_avg : float, optional
Average heat capacity between the wall and bulk temperatures, [J/kg/K]
Cp_b : float, optional
Heat capacity at the bulk temperature, [J/kg/K]
T_b : float
Bulk temperature, [K]
T_w : float
Wall temperature, [K]
T_pc : float
Pseudocritical temperature, i.e. temperature at P where Cp is at a
maximum, [K]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P varied from 22.6 to 29.4
MPa, and D was 7.5 and 10 mm. G varied from 310-1830 kg/m^2/s, q varied
from 116 to 930 kW/m^2, and bulk temperature varied from 230 to 540 decrees
Celsius.
In the database in [3]_, the correlation was considered but not tested.
In [2]_, the correlation was considered but no results were reported.
For enhanced heat transfer database in [2]_, this correlation was the
second best with a MAD of 11.5%. In the database in [3]_, the correlation
was the second best as well.
If the extra information is not provided, the correlation will be used
without the corrections.
Examples
--------
References
----------
.. [1] Yamagata, K, K Nishikawa, S Hasegawa, T Fujii, and S Yoshida.
"Forced Convective Heat Transfer to Supercritical Water Flowing in
Tubes." International Journal of Heat and Mass Transfer 15, no. 12
(December 1, 1972): 2575-93. doi:10.1016/0017-9310(72)90148-2.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
.. [3] Yu, Jiyang, Baoshan Jia, Dan Wu, and Daling Wang. "Optimization of
Heat Transfer Coefficient Correlation at Supercritical Pressure Using
Genetic Algorithms." Heat and Mass Transfer 45, no. 6 (January 8, 2009):
757-66. doi:10.1007/s00231-008-0475-4.
.. [4] Jäger, Wadim, Victor Hugo Sánchez Espinoza, and Antonio Hurtado.
"Review and Proposal for Heat Transfer Predictions at Supercritical
Water Conditions Using Existing Correlations and Experiments." Nuclear
Engineering and Design, (W3MDM) University of Leeds International
Symposium: What Where When? Multi-dimensional Advances for Industrial
Process Monitoring, 241, no. 6 (June 2011): 2184-2203.
doi:10.1016/j.nucengdes.2011.03.022.
]
variable[F] assign[=] constant[1]
if call[name[all], parameter[list[[<ast.Name object at 0x7da2041dbc70>, <ast.Name object at 0x7da2041d99c0>, <ast.Name object at 0x7da2041d9db0>, <ast.Name object at 0x7da2041d8df0>, <ast.Name object at 0x7da2041da9e0>, <ast.Name object at 0x7da2041d8d90>]]]] begin[:]
variable[E] assign[=] binary_operation[binary_operation[name[T_pc] - name[T_b]] / binary_operation[name[T_w] - name[T_b]]]
if compare[name[E] less[<] constant[0]] begin[:]
variable[n2] assign[=] binary_operation[binary_operation[constant[1.44] * binary_operation[constant[1] + binary_operation[constant[1] / name[Pr_pc]]]] - constant[0.53]]
variable[F] assign[=] binary_operation[binary_operation[name[Cp_avg] / name[Cp_b]] ** name[n2]]
return[binary_operation[binary_operation[binary_operation[constant[0.0138] * binary_operation[name[Re] ** constant[0.85]]] * binary_operation[name[Pr] ** constant[0.8]]] * name[F]]]
|
keyword[def] identifier[Nu_Yamagata] ( identifier[Re] , identifier[Pr] , identifier[Pr_pc] = keyword[None] , identifier[Cp_avg] = keyword[None] , identifier[Cp_b] = keyword[None] , identifier[T_b] = keyword[None] ,
identifier[T_w] = keyword[None] , identifier[T_pc] = keyword[None] ):
literal[string]
identifier[F] = literal[int]
keyword[if] identifier[all] ([ identifier[T_b] , identifier[T_w] , identifier[T_pc] , identifier[Pr_pc] , identifier[Cp_avg] , identifier[Cp_b] ]):
identifier[E] =( identifier[T_pc] - identifier[T_b] )/( identifier[T_w] - identifier[T_b] )
keyword[if] identifier[E] < literal[int] :
identifier[n2] = literal[int] *( literal[int] + literal[int] / identifier[Pr_pc] )- literal[int]
identifier[F] =( identifier[Cp_avg] / identifier[Cp_b] )** identifier[n2]
keyword[elif] literal[int] < identifier[E] < literal[int] :
identifier[n1] =- literal[int] *( literal[int] + literal[int] / identifier[Pr_pc] )+ literal[int]
identifier[F] = literal[int] * identifier[Pr_pc] **- literal[int] *( identifier[Cp_avg] / identifier[Cp_b] )** identifier[n1]
keyword[return] literal[int] * identifier[Re] ** literal[int] * identifier[Pr] ** literal[int] * identifier[F]
|
def Nu_Yamagata(Re, Pr, Pr_pc=None, Cp_avg=None, Cp_b=None, T_b=None, T_w=None, T_pc=None):
"""Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_.
.. math::
Nu_b = 0.0138 Re_b^{0.85}Pr_b^{0.8}F
F = \\left(\\frac{\\bar C_p}{C_{p,b}}\\right)^{n_2} \\text{ if }
\\frac{T_{pc}-T_b}{T_w-T_b} < 0
F = 0.67Pr_{pc}^{-0.05} \\left(\\frac{\\bar C_p}{C_{p,b}}\\right)^{n_1}
\\text{ if } 0 < \\frac{T_{pc}-T_b}{T_w-T_b} < 1
F = 1\\text{ if } \\frac{T_{pc}-T_b}{T_w-T_b} > 1
n_1 = -0.77(1 + 1/Pr_{pc}) + 1.49
n_2 = 1.44(1 + 1/Pr_{pc}) - 0.53
\\bar{Cp} = \\frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties, [-]
Pr_pc : float, optional
Prandtl number at the pseudocritical temperature, [-]
Cp_avg : float, optional
Average heat capacity between the wall and bulk temperatures, [J/kg/K]
Cp_b : float, optional
Heat capacity at the bulk temperature, [J/kg/K]
T_b : float
Bulk temperature, [K]
T_w : float
Wall temperature, [K]
T_pc : float
Pseudocritical temperature, i.e. temperature at P where Cp is at a
maximum, [K]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P varied from 22.6 to 29.4
MPa, and D was 7.5 and 10 mm. G varied from 310-1830 kg/m^2/s, q varied
from 116 to 930 kW/m^2, and bulk temperature varied from 230 to 540 decrees
Celsius.
In the database in [3]_, the correlation was considered but not tested.
In [2]_, the correlation was considered but no results were reported.
For enhanced heat transfer database in [2]_, this correlation was the
second best with a MAD of 11.5%. In the database in [3]_, the correlation
was the second best as well.
If the extra information is not provided, the correlation will be used
without the corrections.
Examples
--------
References
----------
.. [1] Yamagata, K, K Nishikawa, S Hasegawa, T Fujii, and S Yoshida.
"Forced Convective Heat Transfer to Supercritical Water Flowing in
Tubes." International Journal of Heat and Mass Transfer 15, no. 12
(December 1, 1972): 2575-93. doi:10.1016/0017-9310(72)90148-2.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
.. [3] Yu, Jiyang, Baoshan Jia, Dan Wu, and Daling Wang. "Optimization of
Heat Transfer Coefficient Correlation at Supercritical Pressure Using
Genetic Algorithms." Heat and Mass Transfer 45, no. 6 (January 8, 2009):
757-66. doi:10.1007/s00231-008-0475-4.
.. [4] Jäger, Wadim, Victor Hugo Sánchez Espinoza, and Antonio Hurtado.
"Review and Proposal for Heat Transfer Predictions at Supercritical
Water Conditions Using Existing Correlations and Experiments." Nuclear
Engineering and Design, (W3MDM) University of Leeds International
Symposium: What Where When? Multi-dimensional Advances for Industrial
Process Monitoring, 241, no. 6 (June 2011): 2184-2203.
doi:10.1016/j.nucengdes.2011.03.022.
"""
F = 1
if all([T_b, T_w, T_pc, Pr_pc, Cp_avg, Cp_b]):
E = (T_pc - T_b) / (T_w - T_b)
if E < 0:
n2 = 1.44 * (1 + 1 / Pr_pc) - 0.53
F = (Cp_avg / Cp_b) ** n2 # depends on [control=['if'], data=[]]
elif 0 < E < 1:
n1 = -0.77 * (1 + 1 / Pr_pc) + 1.49
F = 0.67 * Pr_pc ** (-0.05) * (Cp_avg / Cp_b) ** n1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return 0.0138 * Re ** 0.85 * Pr ** 0.8 * F
|
def get_hpkp_pin(cls, certificate: cryptography.x509.Certificate) -> str:
"""Generate the HTTP Public Key Pinning hash (RFC 7469) for the given certificate.
"""
return b64encode(cls.get_public_key_sha256(certificate)).decode('utf-8')
|
def function[get_hpkp_pin, parameter[cls, certificate]]:
constant[Generate the HTTP Public Key Pinning hash (RFC 7469) for the given certificate.
]
return[call[call[name[b64encode], parameter[call[name[cls].get_public_key_sha256, parameter[name[certificate]]]]].decode, parameter[constant[utf-8]]]]
|
keyword[def] identifier[get_hpkp_pin] ( identifier[cls] , identifier[certificate] : identifier[cryptography] . identifier[x509] . identifier[Certificate] )-> identifier[str] :
literal[string]
keyword[return] identifier[b64encode] ( identifier[cls] . identifier[get_public_key_sha256] ( identifier[certificate] )). identifier[decode] ( literal[string] )
|
def get_hpkp_pin(cls, certificate: cryptography.x509.Certificate) -> str:
"""Generate the HTTP Public Key Pinning hash (RFC 7469) for the given certificate.
"""
return b64encode(cls.get_public_key_sha256(certificate)).decode('utf-8')
|
def detectMidpCapable(self):
"""Return detection of a MIDP mobile Java-capable device
Detects if the current device supports MIDP, a mobile Java technology.
"""
return UAgentInfo.deviceMidp in self.__userAgent \
or UAgentInfo.deviceMidp in self.__httpAccept
|
def function[detectMidpCapable, parameter[self]]:
constant[Return detection of a MIDP mobile Java-capable device
Detects if the current device supports MIDP, a mobile Java technology.
]
return[<ast.BoolOp object at 0x7da1b0b6f490>]
|
keyword[def] identifier[detectMidpCapable] ( identifier[self] ):
literal[string]
keyword[return] identifier[UAgentInfo] . identifier[deviceMidp] keyword[in] identifier[self] . identifier[__userAgent] keyword[or] identifier[UAgentInfo] . identifier[deviceMidp] keyword[in] identifier[self] . identifier[__httpAccept]
|
def detectMidpCapable(self):
"""Return detection of a MIDP mobile Java-capable device
Detects if the current device supports MIDP, a mobile Java technology.
"""
return UAgentInfo.deviceMidp in self.__userAgent or UAgentInfo.deviceMidp in self.__httpAccept
|
def delete_guest_property(self, name):
"""Deletes an entry from the machine's guest property store.
in name of type str
The name of the property to delete.
raises :class:`VBoxErrorInvalidVmState`
Machine session is not open.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
self._call("deleteGuestProperty",
in_p=[name])
|
def function[delete_guest_property, parameter[self, name]]:
constant[Deletes an entry from the machine's guest property store.
in name of type str
The name of the property to delete.
raises :class:`VBoxErrorInvalidVmState`
Machine session is not open.
]
if <ast.UnaryOp object at 0x7da2044c2740> begin[:]
<ast.Raise object at 0x7da2044c0070>
call[name[self]._call, parameter[constant[deleteGuestProperty]]]
|
keyword[def] identifier[delete_guest_property] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[basestring] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[name] ])
|
def delete_guest_property(self, name):
"""Deletes an entry from the machine's guest property store.
in name of type str
The name of the property to delete.
raises :class:`VBoxErrorInvalidVmState`
Machine session is not open.
"""
if not isinstance(name, basestring):
raise TypeError('name can only be an instance of type basestring') # depends on [control=['if'], data=[]]
self._call('deleteGuestProperty', in_p=[name])
|
def get_player_verify(self, tag: crtag, apikey: str, timeout=None):
"""Check the API Key of a player.
This endpoint has been **restricted** to
certain members of the community
Raises BadRequest if the apikey is invalid
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
apikey: str
The API Key in the player's settings
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.PLAYER + '/' + tag + '/verifytoken'
return self._get_model(url, FullPlayer, timeout=timeout, method='POST', json={'token': apikey})
|
def function[get_player_verify, parameter[self, tag, apikey, timeout]]:
constant[Check the API Key of a player.
This endpoint has been **restricted** to
certain members of the community
Raises BadRequest if the apikey is invalid
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
apikey: str
The API Key in the player's settings
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
]
variable[url] assign[=] binary_operation[binary_operation[binary_operation[name[self].api.PLAYER + constant[/]] + name[tag]] + constant[/verifytoken]]
return[call[name[self]._get_model, parameter[name[url], name[FullPlayer]]]]
|
keyword[def] identifier[get_player_verify] ( identifier[self] , identifier[tag] : identifier[crtag] , identifier[apikey] : identifier[str] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[url] = identifier[self] . identifier[api] . identifier[PLAYER] + literal[string] + identifier[tag] + literal[string]
keyword[return] identifier[self] . identifier[_get_model] ( identifier[url] , identifier[FullPlayer] , identifier[timeout] = identifier[timeout] , identifier[method] = literal[string] , identifier[json] ={ literal[string] : identifier[apikey] })
|
def get_player_verify(self, tag: crtag, apikey: str, timeout=None):
"""Check the API Key of a player.
This endpoint has been **restricted** to
certain members of the community
Raises BadRequest if the apikey is invalid
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
apikey: str
The API Key in the player's settings
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.PLAYER + '/' + tag + '/verifytoken'
return self._get_model(url, FullPlayer, timeout=timeout, method='POST', json={'token': apikey})
|
def mode(series):
"""
pandas mode is "empty if nothing has 2+ occurrences."
this method always returns something:
nan if the series is empty/nan), breaking ties arbitrarily
"""
if series.notnull().sum() == 0:
return np.nan
else:
return series.value_counts().idxmax()
|
def function[mode, parameter[series]]:
constant[
pandas mode is "empty if nothing has 2+ occurrences."
this method always returns something:
nan if the series is empty/nan), breaking ties arbitrarily
]
if compare[call[call[name[series].notnull, parameter[]].sum, parameter[]] equal[==] constant[0]] begin[:]
return[name[np].nan]
|
keyword[def] identifier[mode] ( identifier[series] ):
literal[string]
keyword[if] identifier[series] . identifier[notnull] (). identifier[sum] ()== literal[int] :
keyword[return] identifier[np] . identifier[nan]
keyword[else] :
keyword[return] identifier[series] . identifier[value_counts] (). identifier[idxmax] ()
|
def mode(series):
"""
pandas mode is "empty if nothing has 2+ occurrences."
this method always returns something:
nan if the series is empty/nan), breaking ties arbitrarily
"""
if series.notnull().sum() == 0:
return np.nan # depends on [control=['if'], data=[]]
else:
return series.value_counts().idxmax()
|
def wipe_partition(self, partition):
""" Deletes analysis result of partition, e.g. so a repeat
optimisation of the same partition can be done with a
different model """
for grp in partition.get_membership():
grpid = self.scorer.get_id(grp)
cache_dir = self.scorer.cache_dir
prog = self.scorer.task_interface.name
filename = os.path.join(cache_dir, '{}.{}.json'.format(grpid, prog))
if os.path.exists(filename):
os.unlink(filename)
|
def function[wipe_partition, parameter[self, partition]]:
constant[ Deletes analysis result of partition, e.g. so a repeat
optimisation of the same partition can be done with a
different model ]
for taget[name[grp]] in starred[call[name[partition].get_membership, parameter[]]] begin[:]
variable[grpid] assign[=] call[name[self].scorer.get_id, parameter[name[grp]]]
variable[cache_dir] assign[=] name[self].scorer.cache_dir
variable[prog] assign[=] name[self].scorer.task_interface.name
variable[filename] assign[=] call[name[os].path.join, parameter[name[cache_dir], call[constant[{}.{}.json].format, parameter[name[grpid], name[prog]]]]]
if call[name[os].path.exists, parameter[name[filename]]] begin[:]
call[name[os].unlink, parameter[name[filename]]]
|
keyword[def] identifier[wipe_partition] ( identifier[self] , identifier[partition] ):
literal[string]
keyword[for] identifier[grp] keyword[in] identifier[partition] . identifier[get_membership] ():
identifier[grpid] = identifier[self] . identifier[scorer] . identifier[get_id] ( identifier[grp] )
identifier[cache_dir] = identifier[self] . identifier[scorer] . identifier[cache_dir]
identifier[prog] = identifier[self] . identifier[scorer] . identifier[task_interface] . identifier[name]
identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[cache_dir] , literal[string] . identifier[format] ( identifier[grpid] , identifier[prog] ))
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ):
identifier[os] . identifier[unlink] ( identifier[filename] )
|
def wipe_partition(self, partition):
""" Deletes analysis result of partition, e.g. so a repeat
optimisation of the same partition can be done with a
different model """
for grp in partition.get_membership():
grpid = self.scorer.get_id(grp)
cache_dir = self.scorer.cache_dir
prog = self.scorer.task_interface.name
filename = os.path.join(cache_dir, '{}.{}.json'.format(grpid, prog))
if os.path.exists(filename):
os.unlink(filename) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['grp']]
|
def get_id(id):
"""Get a new id if the provided one is None."""
if id == None:
id = wx.NewId()
logger.debug('Generated new ID %s.', id)
else:
logger.debug('Using provided id %s.', id)
return id
|
def function[get_id, parameter[id]]:
constant[Get a new id if the provided one is None.]
if compare[name[id] equal[==] constant[None]] begin[:]
variable[id] assign[=] call[name[wx].NewId, parameter[]]
call[name[logger].debug, parameter[constant[Generated new ID %s.], name[id]]]
return[name[id]]
|
keyword[def] identifier[get_id] ( identifier[id] ):
literal[string]
keyword[if] identifier[id] == keyword[None] :
identifier[id] = identifier[wx] . identifier[NewId] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[id] )
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[id] )
keyword[return] identifier[id]
|
def get_id(id):
"""Get a new id if the provided one is None."""
if id == None:
id = wx.NewId()
logger.debug('Generated new ID %s.', id) # depends on [control=['if'], data=['id']]
else:
logger.debug('Using provided id %s.', id)
return id
|
def discrete_bottleneck(inputs,
hidden_size,
z_size,
filter_size,
mode=None,
bottleneck_kind="dvq",
num_blocks=2,
num_residuals=1,
reshape_method="slice",
projection_tensors=None,
beta=0.25,
ema=True,
means=None,
ema_count=None,
ema_means=None,
epsilon=1e-5,
decay=0.999,
random_top_k=1,
soft_em=False,
num_samples=1,
softmax_k=0,
temperature_warmup_steps=150000,
do_hard_gumbel_softmax=False,
num_flows=0,
approximate_gs_entropy=False,
sum_over_latents=False,
discrete_mix=0.5,
noise_dev=1.,
startup_steps=50000,
summary=True,
name=None,
cond=True):
"""Discretization bottleneck.
Args:
inputs: Input to the bottleneck, a Tensor of shape [..., channels].
hidden_size: Dimension of the dense output.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
filter_size: Filter size in the embedding function.
mode: tf.estimator.ModeKeys.
bottleneck_kind: Kind of discretization bottleneck. One of dense, dvq
(decomposed vector quantization), gumbel-softmax, gumbel-softmax-dvq,
semhash, or vae.
num_blocks: Number of blocks. Used only if bottleneck_kind is DVQ.
num_residuals: Number of residual units used to compute nearest
neighbors. Used only if bottleneck_kind is DVQ.
reshape_method: Method to reshape. Used only if bottleneck_kind is DVQ.
projection_tensors: If the reshape method is project, then these are the
tensors used to project.
beta: Scale factor for codebook loss and EMA. Used only if bottleneck_kind
is DVQ.
ema: Whether to update embeddings using exponential moving averages. Used
only if bottleneck_kind is DVQ.
means: The embedding table. Used only if ema is True.
ema_count: Table of counts for each embedding corresponding to how many
examples in a batch it was the closest to. Used only if ema is True.
ema_means: Exponentially averaged version of the embeddings. Used only if
ema is True.
epsilon: Small value to avoid dividing by zero in EMA update. Used only if
ema is True.
decay: Decay factor for the exponential moving average. Used only if ema is
True.
random_top_k: Noisy top-k. Used only if bottleneck_kind is DVQ.
soft_em: Whether to use soft EM or hard EM. Used only if bottleneck_kind is
DVQ.
num_samples: Number of samples for soft EM. Used only if soft_em is True.
softmax_k: If > 0 then do top-k softmax. Used only if bottleneck_kind
is gumbel-softmax.
temperature_warmup_steps: Number of steps it takes to decay temperature to
0. Used only if bottleneck_kind is gumbel-softmax or gumbel-softmax-dvq.
do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax
samples. Used only if bottleneck_kind is gumbel-softmax-dvq.
num_flows: Number of inverse autoregresive flows. Used only if
bottleneck_kind is gumbel-softmax-dvq.
approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density
as a categorical distribution when calculating the sample entropy. Used
only if bottleneck_kind is gumbel-softmax-dvq.
sum_over_latents: Whether to sum over all non-batch dimensions before
taking mean of entropy loss term. Used only if bottleneck kind is DVQ
or gumbel-softmax-dvq.
discrete_mix: Factor for mixing discrete and non-discrete input. Used only
if bottleneck_kind is semhash.
noise_dev: Noise stddev. Used only if bottleneck_kind is semhash.
startup_steps: Number of steps after which latent predictor is trained. Used
only if bottleneck_kind is semhash.
summary: Whether to write summaries.
name: Name for the bottleneck scope.
cond: A tf.bool condition on whether to update the codebook.
Returns:
outputs_dense: Tensor of shape [..., output_dim]. The output dimension is
hidden_size if bottleneck_kind is gumbel-softmax, DVQ; filter_size if
bottleneck_kind is dense, semhash, vae. If bottleneck_kind is DVQ,
outputs_dense represents the codebook (means) indexed by outputs_discrete.
outputs_discrete: Tensor of shape [...]. Discrete codes, each an index in
[0, 2**z_size). It uses the hot representation if soft_em is True.
extra_loss: Scalar Tensor. Sum of codebook and commitment losses if
bottleneck_kind is DVQ; else zero.
embed_fn: Function embed with arguments partially filled in.
neg_q_entropy: Scalar Tensor representing negative entropy of variational
approximation (0 if it is deterministic).
Raises:
ValueError: If projection_tensors is None for reshape_method project, or
ema_count or ema_means is None if ema is True, or unknown args.
"""
if bottleneck_kind in ["dvq", "gumbel-softmax-dvq"]:
assert means is not None
if hidden_size % num_blocks != 0:
raise ValueError("num_blocks does not divide hidden size")
if z_size % num_residuals != 0:
raise ValueError("num_residuals does not divide embedding table size")
z_size_per_residual = int(z_size / num_residuals)
if z_size_per_residual % num_blocks != 0:
raise ValueError("num_blocks does not divide embedding table size")
block_v_size = 2**int(z_size_per_residual / num_blocks)
if ema:
if ema_count is None:
raise ValueError("ema_count is None but ema is True")
if ema_means is None:
raise ValueError("ema_means is None but ema is True")
else:
block_v_size = None
with tf.variable_scope(
name, default_name="discrete_bottleneck", reuse=tf.AUTO_REUSE):
embed_fn = partial(
embed,
hidden_size=hidden_size,
z_size=z_size,
filter_size=filter_size,
bottleneck_kind=bottleneck_kind,
soft_em=soft_em,
num_blocks=num_blocks,
num_residuals=num_residuals,
block_v_size=block_v_size,
means=means,
name=name)
if bottleneck_kind == "dense":
# Note discrete output is continuous here.
outputs_discrete = tf.layers.dense(inputs, z_size, name="vcc")
outputs_dense = tf.layers.dense(
outputs_discrete, filter_size, name="vch1")
extra_loss = tf.constant(0.0)
neg_q_entropy = tf.constant(0.0)
elif bottleneck_kind in ["dvq", "gumbel-softmax-dvq"]:
inputs_3d = inputs
if len(inputs.shape) == 4:
inputs_3d = tf.squeeze(inputs, axis=2)
if reshape_method == "slice":
x_reshaped = slice_hidden(
inputs_3d, hidden_size=hidden_size, num_blocks=num_blocks)
elif reshape_method == "project":
if projection_tensors is None:
raise ValueError(
"Projection tensors is None for reshape_method project")
x_reshaped = project_hidden(
inputs_3d,
projection_tensors=projection_tensors,
hidden_size=hidden_size,
num_blocks=num_blocks)
else:
raise ValueError("Unknown reshape_method")
x_res = tf.reshape(x_reshaped,
[-1] + common_layers.shape_list(x_reshaped)[2:])
x_means_hot = []
x_means = 0
extra_loss = 0
for i in range(num_residuals):
x_means_hot_res, x_means_res, q_loss_res, e_loss_res, neg_q_entropy = (
embedding_lookup(
x_reshaped,
means=means[i],
num_blocks=num_blocks,
block_v_size=block_v_size,
bottleneck_kind=bottleneck_kind,
random_top_k=random_top_k,
soft_em=soft_em,
num_samples=num_samples,
temperature_warmup_steps=temperature_warmup_steps,
do_hard_gumbel_softmax=do_hard_gumbel_softmax,
num_flows=num_flows,
approximate_gs_entropy=approximate_gs_entropy,
sum_over_latents=sum_over_latents))
# Update the EMA variables.
if ema:
tf.logging.info("Using EMA with beta = {}".format(beta))
updated_ema_count_res = moving_averages.assign_moving_average(
ema_count[i],
tf.where(cond,
tf.reduce_sum(
tf.reshape(x_means_hot_res,
shape=[-1, num_blocks, block_v_size]),
axis=0), ema_count[i]),
decay,
zero_debias=False)
dw = tf.matmul(
tf.transpose(x_means_hot_res, perm=[1, 2, 0]),
tf.transpose(x_res, perm=[1, 0, 2]))
updated_ema_means_res = moving_averages.assign_moving_average(
ema_means[i], tf.where(cond, dw, ema_means[i]),
decay, zero_debias=False)
n = tf.reduce_sum(updated_ema_count_res, axis=-1, keep_dims=True)
updated_ema_count_res = (
(updated_ema_count_res + epsilon) / (n + 2**z_size * epsilon) * n)
# pylint: disable=g-no-augmented-assignment
updated_ema_means_res = updated_ema_means_res / tf.expand_dims(
updated_ema_count_res, axis=-1)
# pylint: enable=g-no-augmented-assignment
with tf.control_dependencies([e_loss_res]):
update_means_res = tf.assign(means[i],
tf.where(cond,
updated_ema_means_res,
means[i]))
with tf.control_dependencies([update_means_res]):
extra_loss += beta * e_loss_res
else:
extra_loss += q_loss_res + beta * e_loss_res
# Update the residuals.
x_res -= x_means_res
x_means += x_means_res
x_means_hot.append(x_means_hot_res)
# Get the discrete latent representation.
x_means_hot = tf.stack(x_means_hot, axis=1)
x_means_idx = tf.argmax(x_means_hot, axis=-1)
# Get the binary representation.
x_means_bits = int_to_bit(
x_means_idx,
num_bits=int(z_size / (num_residuals * num_blocks)),
base=2)
shape = common_layers.shape_list(x_means_bits)
new_shape = shape[:-2]
new_shape[-1] = z_size
x_means_bits = tf.reshape(x_means_bits, shape=new_shape)
outputs_discrete = bit_to_int(
tf.to_int32(x_means_bits), num_bits=z_size, base=2)
# Adjust shape of discrete outputs.
inputs_shape = common_layers.shape_list(inputs)
outputs_discrete = tf.reshape(outputs_discrete, inputs_shape[:-1])
# If we're using soft EM then set discretes to the hot representation.
if soft_em:
outputs_discrete = x_means_hot
outputs_discrete = tf.reshape(outputs_discrete,
inputs_shape[:-1] + [block_v_size])
# Reshape assuming hidden_size == inputs_shape[:-1].
x_means = tf.reshape(x_means, inputs_shape)
outputs_dense = inputs + tf.stop_gradient(x_means - inputs)
elif bottleneck_kind == "gumbel-softmax":
_, outputs_hot, extra_loss = gumbel_softmax(
inputs,
z_size=z_size,
mode=mode,
softmax_k=softmax_k,
temperature_warmup_steps=temperature_warmup_steps,
summary=summary,
name=name)
outputs_discrete = tf.argmax(outputs_hot, axis=-1)
outputs_dense = tf.layers.dense(
outputs_hot, hidden_size, name="dae_dense")
neg_q_entropy = tf.constant(0.0)
elif bottleneck_kind == "semhash":
outputs_discrete = tf.layers.dense(inputs, z_size, name="vcc")
y_clean = common_layers.saturating_sigmoid(outputs_discrete)
if summary:
tf.summary.histogram("y_clean", tf.reshape(y_clean, [-1]))
if noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.truncated_normal(
common_layers.shape_list(outputs_discrete),
mean=0.0,
stddev=noise_dev)
y = common_layers.saturating_sigmoid(outputs_discrete + noise)
else:
y = y_clean
d = tf.to_float(tf.less(0.5, y))
y_discrete = tf.stop_gradient(d) + y - tf.stop_gradient(y)
pd = common_layers.inverse_exp_decay(startup_steps * 2)
pd *= discrete_mix
pd = pd if mode == tf.estimator.ModeKeys.TRAIN else 1.0
c = tf.where(
tf.less(tf.random_uniform([common_layers.shape_list(y)[0]]), pd),
y_discrete, y)
outputs_dense_a = tf.layers.dense(c, filter_size, name="vch1a")
outputs_dense_b = tf.layers.dense(1.0 - c, filter_size, name="vch1b")
outputs_dense = outputs_dense_a + outputs_dense_b
dx = tf.to_int32(tf.stop_gradient(d))
outputs_discrete = bit_to_int(dx, z_size)
extra_loss = tf.constant(0.0)
neg_q_entropy = tf.constant(0.0)
elif bottleneck_kind == "vae":
outputs_discrete, extra_loss, _, _ = vae(inputs, z_size, name="vae")
outputs_dense = tf.layers.dense(
outputs_discrete, filter_size, name="vch1")
neg_q_entropy = tf.constant(0.0)
else:
raise ValueError("Unknown discretization method.")
return outputs_dense, outputs_discrete, extra_loss, embed_fn, neg_q_entropy
|
def function[discrete_bottleneck, parameter[inputs, hidden_size, z_size, filter_size, mode, bottleneck_kind, num_blocks, num_residuals, reshape_method, projection_tensors, beta, ema, means, ema_count, ema_means, epsilon, decay, random_top_k, soft_em, num_samples, softmax_k, temperature_warmup_steps, do_hard_gumbel_softmax, num_flows, approximate_gs_entropy, sum_over_latents, discrete_mix, noise_dev, startup_steps, summary, name, cond]]:
constant[Discretization bottleneck.
Args:
inputs: Input to the bottleneck, a Tensor of shape [..., channels].
hidden_size: Dimension of the dense output.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
filter_size: Filter size in the embedding function.
mode: tf.estimator.ModeKeys.
bottleneck_kind: Kind of discretization bottleneck. One of dense, dvq
(decomposed vector quantization), gumbel-softmax, gumbel-softmax-dvq,
semhash, or vae.
num_blocks: Number of blocks. Used only if bottleneck_kind is DVQ.
num_residuals: Number of residual units used to compute nearest
neighbors. Used only if bottleneck_kind is DVQ.
reshape_method: Method to reshape. Used only if bottleneck_kind is DVQ.
projection_tensors: If the reshape method is project, then these are the
tensors used to project.
beta: Scale factor for codebook loss and EMA. Used only if bottleneck_kind
is DVQ.
ema: Whether to update embeddings using exponential moving averages. Used
only if bottleneck_kind is DVQ.
means: The embedding table. Used only if ema is True.
ema_count: Table of counts for each embedding corresponding to how many
examples in a batch it was the closest to. Used only if ema is True.
ema_means: Exponentially averaged version of the embeddings. Used only if
ema is True.
epsilon: Small value to avoid dividing by zero in EMA update. Used only if
ema is True.
decay: Decay factor for the exponential moving average. Used only if ema is
True.
random_top_k: Noisy top-k. Used only if bottleneck_kind is DVQ.
soft_em: Whether to use soft EM or hard EM. Used only if bottleneck_kind is
DVQ.
num_samples: Number of samples for soft EM. Used only if soft_em is True.
softmax_k: If > 0 then do top-k softmax. Used only if bottleneck_kind
is gumbel-softmax.
temperature_warmup_steps: Number of steps it takes to decay temperature to
0. Used only if bottleneck_kind is gumbel-softmax or gumbel-softmax-dvq.
do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax
samples. Used only if bottleneck_kind is gumbel-softmax-dvq.
num_flows: Number of inverse autoregresive flows. Used only if
bottleneck_kind is gumbel-softmax-dvq.
approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density
as a categorical distribution when calculating the sample entropy. Used
only if bottleneck_kind is gumbel-softmax-dvq.
sum_over_latents: Whether to sum over all non-batch dimensions before
taking mean of entropy loss term. Used only if bottleneck kind is DVQ
or gumbel-softmax-dvq.
discrete_mix: Factor for mixing discrete and non-discrete input. Used only
if bottleneck_kind is semhash.
noise_dev: Noise stddev. Used only if bottleneck_kind is semhash.
startup_steps: Number of steps after which latent predictor is trained. Used
only if bottleneck_kind is semhash.
summary: Whether to write summaries.
name: Name for the bottleneck scope.
cond: A tf.bool condition on whether to update the codebook.
Returns:
outputs_dense: Tensor of shape [..., output_dim]. The output dimension is
hidden_size if bottleneck_kind is gumbel-softmax, DVQ; filter_size if
bottleneck_kind is dense, semhash, vae. If bottleneck_kind is DVQ,
outputs_dense represents the codebook (means) indexed by outputs_discrete.
outputs_discrete: Tensor of shape [...]. Discrete codes, each an index in
[0, 2**z_size). It uses the hot representation if soft_em is True.
extra_loss: Scalar Tensor. Sum of codebook and commitment losses if
bottleneck_kind is DVQ; else zero.
embed_fn: Function embed with arguments partially filled in.
neg_q_entropy: Scalar Tensor representing negative entropy of variational
approximation (0 if it is deterministic).
Raises:
ValueError: If projection_tensors is None for reshape_method project, or
ema_count or ema_means is None if ema is True, or unknown args.
]
if compare[name[bottleneck_kind] in list[[<ast.Constant object at 0x7da1b1f38130>, <ast.Constant object at 0x7da1b1f39330>]]] begin[:]
assert[compare[name[means] is_not constant[None]]]
if compare[binary_operation[name[hidden_size] <ast.Mod object at 0x7da2590d6920> name[num_blocks]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1f38280>
if compare[binary_operation[name[z_size] <ast.Mod object at 0x7da2590d6920> name[num_residuals]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1f38fa0>
variable[z_size_per_residual] assign[=] call[name[int], parameter[binary_operation[name[z_size] / name[num_residuals]]]]
if compare[binary_operation[name[z_size_per_residual] <ast.Mod object at 0x7da2590d6920> name[num_blocks]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1f3b370>
variable[block_v_size] assign[=] binary_operation[constant[2] ** call[name[int], parameter[binary_operation[name[z_size_per_residual] / name[num_blocks]]]]]
if name[ema] begin[:]
if compare[name[ema_count] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1f39c30>
if compare[name[ema_means] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1f3a500>
with call[name[tf].variable_scope, parameter[name[name]]] begin[:]
variable[embed_fn] assign[=] call[name[partial], parameter[name[embed]]]
if compare[name[bottleneck_kind] equal[==] constant[dense]] begin[:]
variable[outputs_discrete] assign[=] call[name[tf].layers.dense, parameter[name[inputs], name[z_size]]]
variable[outputs_dense] assign[=] call[name[tf].layers.dense, parameter[name[outputs_discrete], name[filter_size]]]
variable[extra_loss] assign[=] call[name[tf].constant, parameter[constant[0.0]]]
variable[neg_q_entropy] assign[=] call[name[tf].constant, parameter[constant[0.0]]]
return[tuple[[<ast.Name object at 0x7da1b20f9180>, <ast.Name object at 0x7da1b20f8370>, <ast.Name object at 0x7da1b20f8eb0>, <ast.Name object at 0x7da1b20f9f00>, <ast.Name object at 0x7da1b20f9c90>]]]
|
keyword[def] identifier[discrete_bottleneck] ( identifier[inputs] ,
identifier[hidden_size] ,
identifier[z_size] ,
identifier[filter_size] ,
identifier[mode] = keyword[None] ,
identifier[bottleneck_kind] = literal[string] ,
identifier[num_blocks] = literal[int] ,
identifier[num_residuals] = literal[int] ,
identifier[reshape_method] = literal[string] ,
identifier[projection_tensors] = keyword[None] ,
identifier[beta] = literal[int] ,
identifier[ema] = keyword[True] ,
identifier[means] = keyword[None] ,
identifier[ema_count] = keyword[None] ,
identifier[ema_means] = keyword[None] ,
identifier[epsilon] = literal[int] ,
identifier[decay] = literal[int] ,
identifier[random_top_k] = literal[int] ,
identifier[soft_em] = keyword[False] ,
identifier[num_samples] = literal[int] ,
identifier[softmax_k] = literal[int] ,
identifier[temperature_warmup_steps] = literal[int] ,
identifier[do_hard_gumbel_softmax] = keyword[False] ,
identifier[num_flows] = literal[int] ,
identifier[approximate_gs_entropy] = keyword[False] ,
identifier[sum_over_latents] = keyword[False] ,
identifier[discrete_mix] = literal[int] ,
identifier[noise_dev] = literal[int] ,
identifier[startup_steps] = literal[int] ,
identifier[summary] = keyword[True] ,
identifier[name] = keyword[None] ,
identifier[cond] = keyword[True] ):
literal[string]
keyword[if] identifier[bottleneck_kind] keyword[in] [ literal[string] , literal[string] ]:
keyword[assert] identifier[means] keyword[is] keyword[not] keyword[None]
keyword[if] identifier[hidden_size] % identifier[num_blocks] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[z_size] % identifier[num_residuals] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[z_size_per_residual] = identifier[int] ( identifier[z_size] / identifier[num_residuals] )
keyword[if] identifier[z_size_per_residual] % identifier[num_blocks] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[block_v_size] = literal[int] ** identifier[int] ( identifier[z_size_per_residual] / identifier[num_blocks] )
keyword[if] identifier[ema] :
keyword[if] identifier[ema_count] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[ema_means] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
identifier[block_v_size] = keyword[None]
keyword[with] identifier[tf] . identifier[variable_scope] (
identifier[name] , identifier[default_name] = literal[string] , identifier[reuse] = identifier[tf] . identifier[AUTO_REUSE] ):
identifier[embed_fn] = identifier[partial] (
identifier[embed] ,
identifier[hidden_size] = identifier[hidden_size] ,
identifier[z_size] = identifier[z_size] ,
identifier[filter_size] = identifier[filter_size] ,
identifier[bottleneck_kind] = identifier[bottleneck_kind] ,
identifier[soft_em] = identifier[soft_em] ,
identifier[num_blocks] = identifier[num_blocks] ,
identifier[num_residuals] = identifier[num_residuals] ,
identifier[block_v_size] = identifier[block_v_size] ,
identifier[means] = identifier[means] ,
identifier[name] = identifier[name] )
keyword[if] identifier[bottleneck_kind] == literal[string] :
identifier[outputs_discrete] = identifier[tf] . identifier[layers] . identifier[dense] ( identifier[inputs] , identifier[z_size] , identifier[name] = literal[string] )
identifier[outputs_dense] = identifier[tf] . identifier[layers] . identifier[dense] (
identifier[outputs_discrete] , identifier[filter_size] , identifier[name] = literal[string] )
identifier[extra_loss] = identifier[tf] . identifier[constant] ( literal[int] )
identifier[neg_q_entropy] = identifier[tf] . identifier[constant] ( literal[int] )
keyword[elif] identifier[bottleneck_kind] keyword[in] [ literal[string] , literal[string] ]:
identifier[inputs_3d] = identifier[inputs]
keyword[if] identifier[len] ( identifier[inputs] . identifier[shape] )== literal[int] :
identifier[inputs_3d] = identifier[tf] . identifier[squeeze] ( identifier[inputs] , identifier[axis] = literal[int] )
keyword[if] identifier[reshape_method] == literal[string] :
identifier[x_reshaped] = identifier[slice_hidden] (
identifier[inputs_3d] , identifier[hidden_size] = identifier[hidden_size] , identifier[num_blocks] = identifier[num_blocks] )
keyword[elif] identifier[reshape_method] == literal[string] :
keyword[if] identifier[projection_tensors] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] (
literal[string] )
identifier[x_reshaped] = identifier[project_hidden] (
identifier[inputs_3d] ,
identifier[projection_tensors] = identifier[projection_tensors] ,
identifier[hidden_size] = identifier[hidden_size] ,
identifier[num_blocks] = identifier[num_blocks] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[x_res] = identifier[tf] . identifier[reshape] ( identifier[x_reshaped] ,
[- literal[int] ]+ identifier[common_layers] . identifier[shape_list] ( identifier[x_reshaped] )[ literal[int] :])
identifier[x_means_hot] =[]
identifier[x_means] = literal[int]
identifier[extra_loss] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_residuals] ):
identifier[x_means_hot_res] , identifier[x_means_res] , identifier[q_loss_res] , identifier[e_loss_res] , identifier[neg_q_entropy] =(
identifier[embedding_lookup] (
identifier[x_reshaped] ,
identifier[means] = identifier[means] [ identifier[i] ],
identifier[num_blocks] = identifier[num_blocks] ,
identifier[block_v_size] = identifier[block_v_size] ,
identifier[bottleneck_kind] = identifier[bottleneck_kind] ,
identifier[random_top_k] = identifier[random_top_k] ,
identifier[soft_em] = identifier[soft_em] ,
identifier[num_samples] = identifier[num_samples] ,
identifier[temperature_warmup_steps] = identifier[temperature_warmup_steps] ,
identifier[do_hard_gumbel_softmax] = identifier[do_hard_gumbel_softmax] ,
identifier[num_flows] = identifier[num_flows] ,
identifier[approximate_gs_entropy] = identifier[approximate_gs_entropy] ,
identifier[sum_over_latents] = identifier[sum_over_latents] ))
keyword[if] identifier[ema] :
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[beta] ))
identifier[updated_ema_count_res] = identifier[moving_averages] . identifier[assign_moving_average] (
identifier[ema_count] [ identifier[i] ],
identifier[tf] . identifier[where] ( identifier[cond] ,
identifier[tf] . identifier[reduce_sum] (
identifier[tf] . identifier[reshape] ( identifier[x_means_hot_res] ,
identifier[shape] =[- literal[int] , identifier[num_blocks] , identifier[block_v_size] ]),
identifier[axis] = literal[int] ), identifier[ema_count] [ identifier[i] ]),
identifier[decay] ,
identifier[zero_debias] = keyword[False] )
identifier[dw] = identifier[tf] . identifier[matmul] (
identifier[tf] . identifier[transpose] ( identifier[x_means_hot_res] , identifier[perm] =[ literal[int] , literal[int] , literal[int] ]),
identifier[tf] . identifier[transpose] ( identifier[x_res] , identifier[perm] =[ literal[int] , literal[int] , literal[int] ]))
identifier[updated_ema_means_res] = identifier[moving_averages] . identifier[assign_moving_average] (
identifier[ema_means] [ identifier[i] ], identifier[tf] . identifier[where] ( identifier[cond] , identifier[dw] , identifier[ema_means] [ identifier[i] ]),
identifier[decay] , identifier[zero_debias] = keyword[False] )
identifier[n] = identifier[tf] . identifier[reduce_sum] ( identifier[updated_ema_count_res] , identifier[axis] =- literal[int] , identifier[keep_dims] = keyword[True] )
identifier[updated_ema_count_res] =(
( identifier[updated_ema_count_res] + identifier[epsilon] )/( identifier[n] + literal[int] ** identifier[z_size] * identifier[epsilon] )* identifier[n] )
identifier[updated_ema_means_res] = identifier[updated_ema_means_res] / identifier[tf] . identifier[expand_dims] (
identifier[updated_ema_count_res] , identifier[axis] =- literal[int] )
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[e_loss_res] ]):
identifier[update_means_res] = identifier[tf] . identifier[assign] ( identifier[means] [ identifier[i] ],
identifier[tf] . identifier[where] ( identifier[cond] ,
identifier[updated_ema_means_res] ,
identifier[means] [ identifier[i] ]))
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[update_means_res] ]):
identifier[extra_loss] += identifier[beta] * identifier[e_loss_res]
keyword[else] :
identifier[extra_loss] += identifier[q_loss_res] + identifier[beta] * identifier[e_loss_res]
identifier[x_res] -= identifier[x_means_res]
identifier[x_means] += identifier[x_means_res]
identifier[x_means_hot] . identifier[append] ( identifier[x_means_hot_res] )
identifier[x_means_hot] = identifier[tf] . identifier[stack] ( identifier[x_means_hot] , identifier[axis] = literal[int] )
identifier[x_means_idx] = identifier[tf] . identifier[argmax] ( identifier[x_means_hot] , identifier[axis] =- literal[int] )
identifier[x_means_bits] = identifier[int_to_bit] (
identifier[x_means_idx] ,
identifier[num_bits] = identifier[int] ( identifier[z_size] /( identifier[num_residuals] * identifier[num_blocks] )),
identifier[base] = literal[int] )
identifier[shape] = identifier[common_layers] . identifier[shape_list] ( identifier[x_means_bits] )
identifier[new_shape] = identifier[shape] [:- literal[int] ]
identifier[new_shape] [- literal[int] ]= identifier[z_size]
identifier[x_means_bits] = identifier[tf] . identifier[reshape] ( identifier[x_means_bits] , identifier[shape] = identifier[new_shape] )
identifier[outputs_discrete] = identifier[bit_to_int] (
identifier[tf] . identifier[to_int32] ( identifier[x_means_bits] ), identifier[num_bits] = identifier[z_size] , identifier[base] = literal[int] )
identifier[inputs_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[inputs] )
identifier[outputs_discrete] = identifier[tf] . identifier[reshape] ( identifier[outputs_discrete] , identifier[inputs_shape] [:- literal[int] ])
keyword[if] identifier[soft_em] :
identifier[outputs_discrete] = identifier[x_means_hot]
identifier[outputs_discrete] = identifier[tf] . identifier[reshape] ( identifier[outputs_discrete] ,
identifier[inputs_shape] [:- literal[int] ]+[ identifier[block_v_size] ])
identifier[x_means] = identifier[tf] . identifier[reshape] ( identifier[x_means] , identifier[inputs_shape] )
identifier[outputs_dense] = identifier[inputs] + identifier[tf] . identifier[stop_gradient] ( identifier[x_means] - identifier[inputs] )
keyword[elif] identifier[bottleneck_kind] == literal[string] :
identifier[_] , identifier[outputs_hot] , identifier[extra_loss] = identifier[gumbel_softmax] (
identifier[inputs] ,
identifier[z_size] = identifier[z_size] ,
identifier[mode] = identifier[mode] ,
identifier[softmax_k] = identifier[softmax_k] ,
identifier[temperature_warmup_steps] = identifier[temperature_warmup_steps] ,
identifier[summary] = identifier[summary] ,
identifier[name] = identifier[name] )
identifier[outputs_discrete] = identifier[tf] . identifier[argmax] ( identifier[outputs_hot] , identifier[axis] =- literal[int] )
identifier[outputs_dense] = identifier[tf] . identifier[layers] . identifier[dense] (
identifier[outputs_hot] , identifier[hidden_size] , identifier[name] = literal[string] )
identifier[neg_q_entropy] = identifier[tf] . identifier[constant] ( literal[int] )
keyword[elif] identifier[bottleneck_kind] == literal[string] :
identifier[outputs_discrete] = identifier[tf] . identifier[layers] . identifier[dense] ( identifier[inputs] , identifier[z_size] , identifier[name] = literal[string] )
identifier[y_clean] = identifier[common_layers] . identifier[saturating_sigmoid] ( identifier[outputs_discrete] )
keyword[if] identifier[summary] :
identifier[tf] . identifier[summary] . identifier[histogram] ( literal[string] , identifier[tf] . identifier[reshape] ( identifier[y_clean] ,[- literal[int] ]))
keyword[if] identifier[noise_dev] > literal[int] keyword[and] identifier[mode] == identifier[tf] . identifier[estimator] . identifier[ModeKeys] . identifier[TRAIN] :
identifier[noise] = identifier[tf] . identifier[truncated_normal] (
identifier[common_layers] . identifier[shape_list] ( identifier[outputs_discrete] ),
identifier[mean] = literal[int] ,
identifier[stddev] = identifier[noise_dev] )
identifier[y] = identifier[common_layers] . identifier[saturating_sigmoid] ( identifier[outputs_discrete] + identifier[noise] )
keyword[else] :
identifier[y] = identifier[y_clean]
identifier[d] = identifier[tf] . identifier[to_float] ( identifier[tf] . identifier[less] ( literal[int] , identifier[y] ))
identifier[y_discrete] = identifier[tf] . identifier[stop_gradient] ( identifier[d] )+ identifier[y] - identifier[tf] . identifier[stop_gradient] ( identifier[y] )
identifier[pd] = identifier[common_layers] . identifier[inverse_exp_decay] ( identifier[startup_steps] * literal[int] )
identifier[pd] *= identifier[discrete_mix]
identifier[pd] = identifier[pd] keyword[if] identifier[mode] == identifier[tf] . identifier[estimator] . identifier[ModeKeys] . identifier[TRAIN] keyword[else] literal[int]
identifier[c] = identifier[tf] . identifier[where] (
identifier[tf] . identifier[less] ( identifier[tf] . identifier[random_uniform] ([ identifier[common_layers] . identifier[shape_list] ( identifier[y] )[ literal[int] ]]), identifier[pd] ),
identifier[y_discrete] , identifier[y] )
identifier[outputs_dense_a] = identifier[tf] . identifier[layers] . identifier[dense] ( identifier[c] , identifier[filter_size] , identifier[name] = literal[string] )
identifier[outputs_dense_b] = identifier[tf] . identifier[layers] . identifier[dense] ( literal[int] - identifier[c] , identifier[filter_size] , identifier[name] = literal[string] )
identifier[outputs_dense] = identifier[outputs_dense_a] + identifier[outputs_dense_b]
identifier[dx] = identifier[tf] . identifier[to_int32] ( identifier[tf] . identifier[stop_gradient] ( identifier[d] ))
identifier[outputs_discrete] = identifier[bit_to_int] ( identifier[dx] , identifier[z_size] )
identifier[extra_loss] = identifier[tf] . identifier[constant] ( literal[int] )
identifier[neg_q_entropy] = identifier[tf] . identifier[constant] ( literal[int] )
keyword[elif] identifier[bottleneck_kind] == literal[string] :
identifier[outputs_discrete] , identifier[extra_loss] , identifier[_] , identifier[_] = identifier[vae] ( identifier[inputs] , identifier[z_size] , identifier[name] = literal[string] )
identifier[outputs_dense] = identifier[tf] . identifier[layers] . identifier[dense] (
identifier[outputs_discrete] , identifier[filter_size] , identifier[name] = literal[string] )
identifier[neg_q_entropy] = identifier[tf] . identifier[constant] ( literal[int] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[outputs_dense] , identifier[outputs_discrete] , identifier[extra_loss] , identifier[embed_fn] , identifier[neg_q_entropy]
|
def discrete_bottleneck(inputs, hidden_size, z_size, filter_size, mode=None, bottleneck_kind='dvq', num_blocks=2, num_residuals=1, reshape_method='slice', projection_tensors=None, beta=0.25, ema=True, means=None, ema_count=None, ema_means=None, epsilon=1e-05, decay=0.999, random_top_k=1, soft_em=False, num_samples=1, softmax_k=0, temperature_warmup_steps=150000, do_hard_gumbel_softmax=False, num_flows=0, approximate_gs_entropy=False, sum_over_latents=False, discrete_mix=0.5, noise_dev=1.0, startup_steps=50000, summary=True, name=None, cond=True):
"""Discretization bottleneck.
Args:
inputs: Input to the bottleneck, a Tensor of shape [..., channels].
hidden_size: Dimension of the dense output.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
filter_size: Filter size in the embedding function.
mode: tf.estimator.ModeKeys.
bottleneck_kind: Kind of discretization bottleneck. One of dense, dvq
(decomposed vector quantization), gumbel-softmax, gumbel-softmax-dvq,
semhash, or vae.
num_blocks: Number of blocks. Used only if bottleneck_kind is DVQ.
num_residuals: Number of residual units used to compute nearest
neighbors. Used only if bottleneck_kind is DVQ.
reshape_method: Method to reshape. Used only if bottleneck_kind is DVQ.
projection_tensors: If the reshape method is project, then these are the
tensors used to project.
beta: Scale factor for codebook loss and EMA. Used only if bottleneck_kind
is DVQ.
ema: Whether to update embeddings using exponential moving averages. Used
only if bottleneck_kind is DVQ.
means: The embedding table. Used only if ema is True.
ema_count: Table of counts for each embedding corresponding to how many
examples in a batch it was the closest to. Used only if ema is True.
ema_means: Exponentially averaged version of the embeddings. Used only if
ema is True.
epsilon: Small value to avoid dividing by zero in EMA update. Used only if
ema is True.
decay: Decay factor for the exponential moving average. Used only if ema is
True.
random_top_k: Noisy top-k. Used only if bottleneck_kind is DVQ.
soft_em: Whether to use soft EM or hard EM. Used only if bottleneck_kind is
DVQ.
num_samples: Number of samples for soft EM. Used only if soft_em is True.
softmax_k: If > 0 then do top-k softmax. Used only if bottleneck_kind
is gumbel-softmax.
temperature_warmup_steps: Number of steps it takes to decay temperature to
0. Used only if bottleneck_kind is gumbel-softmax or gumbel-softmax-dvq.
do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax
samples. Used only if bottleneck_kind is gumbel-softmax-dvq.
num_flows: Number of inverse autoregresive flows. Used only if
bottleneck_kind is gumbel-softmax-dvq.
approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density
as a categorical distribution when calculating the sample entropy. Used
only if bottleneck_kind is gumbel-softmax-dvq.
sum_over_latents: Whether to sum over all non-batch dimensions before
taking mean of entropy loss term. Used only if bottleneck kind is DVQ
or gumbel-softmax-dvq.
discrete_mix: Factor for mixing discrete and non-discrete input. Used only
if bottleneck_kind is semhash.
noise_dev: Noise stddev. Used only if bottleneck_kind is semhash.
startup_steps: Number of steps after which latent predictor is trained. Used
only if bottleneck_kind is semhash.
summary: Whether to write summaries.
name: Name for the bottleneck scope.
cond: A tf.bool condition on whether to update the codebook.
Returns:
outputs_dense: Tensor of shape [..., output_dim]. The output dimension is
hidden_size if bottleneck_kind is gumbel-softmax, DVQ; filter_size if
bottleneck_kind is dense, semhash, vae. If bottleneck_kind is DVQ,
outputs_dense represents the codebook (means) indexed by outputs_discrete.
outputs_discrete: Tensor of shape [...]. Discrete codes, each an index in
[0, 2**z_size). It uses the hot representation if soft_em is True.
extra_loss: Scalar Tensor. Sum of codebook and commitment losses if
bottleneck_kind is DVQ; else zero.
embed_fn: Function embed with arguments partially filled in.
neg_q_entropy: Scalar Tensor representing negative entropy of variational
approximation (0 if it is deterministic).
Raises:
ValueError: If projection_tensors is None for reshape_method project, or
ema_count or ema_means is None if ema is True, or unknown args.
"""
if bottleneck_kind in ['dvq', 'gumbel-softmax-dvq']:
assert means is not None
if hidden_size % num_blocks != 0:
raise ValueError('num_blocks does not divide hidden size') # depends on [control=['if'], data=[]]
if z_size % num_residuals != 0:
raise ValueError('num_residuals does not divide embedding table size') # depends on [control=['if'], data=[]]
z_size_per_residual = int(z_size / num_residuals)
if z_size_per_residual % num_blocks != 0:
raise ValueError('num_blocks does not divide embedding table size') # depends on [control=['if'], data=[]]
block_v_size = 2 ** int(z_size_per_residual / num_blocks)
if ema:
if ema_count is None:
raise ValueError('ema_count is None but ema is True') # depends on [control=['if'], data=[]]
if ema_means is None:
raise ValueError('ema_means is None but ema is True') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
block_v_size = None
with tf.variable_scope(name, default_name='discrete_bottleneck', reuse=tf.AUTO_REUSE):
embed_fn = partial(embed, hidden_size=hidden_size, z_size=z_size, filter_size=filter_size, bottleneck_kind=bottleneck_kind, soft_em=soft_em, num_blocks=num_blocks, num_residuals=num_residuals, block_v_size=block_v_size, means=means, name=name)
if bottleneck_kind == 'dense':
# Note discrete output is continuous here.
outputs_discrete = tf.layers.dense(inputs, z_size, name='vcc')
outputs_dense = tf.layers.dense(outputs_discrete, filter_size, name='vch1')
extra_loss = tf.constant(0.0)
neg_q_entropy = tf.constant(0.0) # depends on [control=['if'], data=[]]
elif bottleneck_kind in ['dvq', 'gumbel-softmax-dvq']:
inputs_3d = inputs
if len(inputs.shape) == 4:
inputs_3d = tf.squeeze(inputs, axis=2) # depends on [control=['if'], data=[]]
if reshape_method == 'slice':
x_reshaped = slice_hidden(inputs_3d, hidden_size=hidden_size, num_blocks=num_blocks) # depends on [control=['if'], data=[]]
elif reshape_method == 'project':
if projection_tensors is None:
raise ValueError('Projection tensors is None for reshape_method project') # depends on [control=['if'], data=[]]
x_reshaped = project_hidden(inputs_3d, projection_tensors=projection_tensors, hidden_size=hidden_size, num_blocks=num_blocks) # depends on [control=['if'], data=[]]
else:
raise ValueError('Unknown reshape_method')
x_res = tf.reshape(x_reshaped, [-1] + common_layers.shape_list(x_reshaped)[2:])
x_means_hot = []
x_means = 0
extra_loss = 0
for i in range(num_residuals):
(x_means_hot_res, x_means_res, q_loss_res, e_loss_res, neg_q_entropy) = embedding_lookup(x_reshaped, means=means[i], num_blocks=num_blocks, block_v_size=block_v_size, bottleneck_kind=bottleneck_kind, random_top_k=random_top_k, soft_em=soft_em, num_samples=num_samples, temperature_warmup_steps=temperature_warmup_steps, do_hard_gumbel_softmax=do_hard_gumbel_softmax, num_flows=num_flows, approximate_gs_entropy=approximate_gs_entropy, sum_over_latents=sum_over_latents)
# Update the EMA variables.
if ema:
tf.logging.info('Using EMA with beta = {}'.format(beta))
updated_ema_count_res = moving_averages.assign_moving_average(ema_count[i], tf.where(cond, tf.reduce_sum(tf.reshape(x_means_hot_res, shape=[-1, num_blocks, block_v_size]), axis=0), ema_count[i]), decay, zero_debias=False)
dw = tf.matmul(tf.transpose(x_means_hot_res, perm=[1, 2, 0]), tf.transpose(x_res, perm=[1, 0, 2]))
updated_ema_means_res = moving_averages.assign_moving_average(ema_means[i], tf.where(cond, dw, ema_means[i]), decay, zero_debias=False)
n = tf.reduce_sum(updated_ema_count_res, axis=-1, keep_dims=True)
updated_ema_count_res = (updated_ema_count_res + epsilon) / (n + 2 ** z_size * epsilon) * n
# pylint: disable=g-no-augmented-assignment
updated_ema_means_res = updated_ema_means_res / tf.expand_dims(updated_ema_count_res, axis=-1)
# pylint: enable=g-no-augmented-assignment
with tf.control_dependencies([e_loss_res]):
update_means_res = tf.assign(means[i], tf.where(cond, updated_ema_means_res, means[i]))
with tf.control_dependencies([update_means_res]):
extra_loss += beta * e_loss_res # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
else:
extra_loss += q_loss_res + beta * e_loss_res
# Update the residuals.
x_res -= x_means_res
x_means += x_means_res
x_means_hot.append(x_means_hot_res) # depends on [control=['for'], data=['i']]
# Get the discrete latent representation.
x_means_hot = tf.stack(x_means_hot, axis=1)
x_means_idx = tf.argmax(x_means_hot, axis=-1)
# Get the binary representation.
x_means_bits = int_to_bit(x_means_idx, num_bits=int(z_size / (num_residuals * num_blocks)), base=2)
shape = common_layers.shape_list(x_means_bits)
new_shape = shape[:-2]
new_shape[-1] = z_size
x_means_bits = tf.reshape(x_means_bits, shape=new_shape)
outputs_discrete = bit_to_int(tf.to_int32(x_means_bits), num_bits=z_size, base=2)
# Adjust shape of discrete outputs.
inputs_shape = common_layers.shape_list(inputs)
outputs_discrete = tf.reshape(outputs_discrete, inputs_shape[:-1])
# If we're using soft EM then set discretes to the hot representation.
if soft_em:
outputs_discrete = x_means_hot
outputs_discrete = tf.reshape(outputs_discrete, inputs_shape[:-1] + [block_v_size]) # depends on [control=['if'], data=[]]
# Reshape assuming hidden_size == inputs_shape[:-1].
x_means = tf.reshape(x_means, inputs_shape)
outputs_dense = inputs + tf.stop_gradient(x_means - inputs) # depends on [control=['if'], data=['bottleneck_kind']]
elif bottleneck_kind == 'gumbel-softmax':
(_, outputs_hot, extra_loss) = gumbel_softmax(inputs, z_size=z_size, mode=mode, softmax_k=softmax_k, temperature_warmup_steps=temperature_warmup_steps, summary=summary, name=name)
outputs_discrete = tf.argmax(outputs_hot, axis=-1)
outputs_dense = tf.layers.dense(outputs_hot, hidden_size, name='dae_dense')
neg_q_entropy = tf.constant(0.0) # depends on [control=['if'], data=[]]
elif bottleneck_kind == 'semhash':
outputs_discrete = tf.layers.dense(inputs, z_size, name='vcc')
y_clean = common_layers.saturating_sigmoid(outputs_discrete)
if summary:
tf.summary.histogram('y_clean', tf.reshape(y_clean, [-1])) # depends on [control=['if'], data=[]]
if noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.truncated_normal(common_layers.shape_list(outputs_discrete), mean=0.0, stddev=noise_dev)
y = common_layers.saturating_sigmoid(outputs_discrete + noise) # depends on [control=['if'], data=[]]
else:
y = y_clean
d = tf.to_float(tf.less(0.5, y))
y_discrete = tf.stop_gradient(d) + y - tf.stop_gradient(y)
pd = common_layers.inverse_exp_decay(startup_steps * 2)
pd *= discrete_mix
pd = pd if mode == tf.estimator.ModeKeys.TRAIN else 1.0
c = tf.where(tf.less(tf.random_uniform([common_layers.shape_list(y)[0]]), pd), y_discrete, y)
outputs_dense_a = tf.layers.dense(c, filter_size, name='vch1a')
outputs_dense_b = tf.layers.dense(1.0 - c, filter_size, name='vch1b')
outputs_dense = outputs_dense_a + outputs_dense_b
dx = tf.to_int32(tf.stop_gradient(d))
outputs_discrete = bit_to_int(dx, z_size)
extra_loss = tf.constant(0.0)
neg_q_entropy = tf.constant(0.0) # depends on [control=['if'], data=[]]
elif bottleneck_kind == 'vae':
(outputs_discrete, extra_loss, _, _) = vae(inputs, z_size, name='vae')
outputs_dense = tf.layers.dense(outputs_discrete, filter_size, name='vch1')
neg_q_entropy = tf.constant(0.0) # depends on [control=['if'], data=[]]
else:
raise ValueError('Unknown discretization method.') # depends on [control=['with'], data=[]]
return (outputs_dense, outputs_discrete, extra_loss, embed_fn, neg_q_entropy)
|
def intersection(self, *iterables):
"""
Return a new set with elements common to the set and all *iterables*.
"""
comb = self._set.intersection(*iterables)
return self._fromset(comb, key=self._key)
|
def function[intersection, parameter[self]]:
constant[
Return a new set with elements common to the set and all *iterables*.
]
variable[comb] assign[=] call[name[self]._set.intersection, parameter[<ast.Starred object at 0x7da1b170f4c0>]]
return[call[name[self]._fromset, parameter[name[comb]]]]
|
keyword[def] identifier[intersection] ( identifier[self] ,* identifier[iterables] ):
literal[string]
identifier[comb] = identifier[self] . identifier[_set] . identifier[intersection] (* identifier[iterables] )
keyword[return] identifier[self] . identifier[_fromset] ( identifier[comb] , identifier[key] = identifier[self] . identifier[_key] )
|
def intersection(self, *iterables):
"""
Return a new set with elements common to the set and all *iterables*.
"""
comb = self._set.intersection(*iterables)
return self._fromset(comb, key=self._key)
|
def documentation(self):
r"""
Configuration documentation in reStructuredText_ syntax (a string).
The purpose of the :attr:`documentation` property is to provide
documentation on the integration of :class:`ConfigLoader` into other
projects without denormalizing the required knowledge via copy/paste.
.. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText
"""
from humanfriendly.tables import format_rst_table
formatted_table = format_rst_table([
(directory,
self.get_main_pattern(directory).replace('*', r'\*'),
self.get_modular_pattern(directory).replace('*', r'\*'))
for directory in self.base_directories
], [
"Directory",
"Main configuration file",
"Modular configuration files",
])
return format(DOCUMENTATION_TEMPLATE, table=formatted_table).strip()
|
def function[documentation, parameter[self]]:
constant[
Configuration documentation in reStructuredText_ syntax (a string).
The purpose of the :attr:`documentation` property is to provide
documentation on the integration of :class:`ConfigLoader` into other
projects without denormalizing the required knowledge via copy/paste.
.. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText
]
from relative_module[humanfriendly.tables] import module[format_rst_table]
variable[formatted_table] assign[=] call[name[format_rst_table], parameter[<ast.ListComp object at 0x7da1b27e99c0>, list[[<ast.Constant object at 0x7da1b27eb0a0>, <ast.Constant object at 0x7da1b27ea230>, <ast.Constant object at 0x7da1b27e9510>]]]]
return[call[call[name[format], parameter[name[DOCUMENTATION_TEMPLATE]]].strip, parameter[]]]
|
keyword[def] identifier[documentation] ( identifier[self] ):
literal[string]
keyword[from] identifier[humanfriendly] . identifier[tables] keyword[import] identifier[format_rst_table]
identifier[formatted_table] = identifier[format_rst_table] ([
( identifier[directory] ,
identifier[self] . identifier[get_main_pattern] ( identifier[directory] ). identifier[replace] ( literal[string] , literal[string] ),
identifier[self] . identifier[get_modular_pattern] ( identifier[directory] ). identifier[replace] ( literal[string] , literal[string] ))
keyword[for] identifier[directory] keyword[in] identifier[self] . identifier[base_directories]
],[
literal[string] ,
literal[string] ,
literal[string] ,
])
keyword[return] identifier[format] ( identifier[DOCUMENTATION_TEMPLATE] , identifier[table] = identifier[formatted_table] ). identifier[strip] ()
|
def documentation(self):
"""
Configuration documentation in reStructuredText_ syntax (a string).
The purpose of the :attr:`documentation` property is to provide
documentation on the integration of :class:`ConfigLoader` into other
projects without denormalizing the required knowledge via copy/paste.
.. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText
"""
from humanfriendly.tables import format_rst_table
formatted_table = format_rst_table([(directory, self.get_main_pattern(directory).replace('*', '\\*'), self.get_modular_pattern(directory).replace('*', '\\*')) for directory in self.base_directories], ['Directory', 'Main configuration file', 'Modular configuration files'])
return format(DOCUMENTATION_TEMPLATE, table=formatted_table).strip()
|
def predict_cumulative_hazard(self, X, times=None):
"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
Returns
-------
cumulative_hazard_ : DataFrame
the cumulative hazard of individuals over the timeline
"""
if self.strata:
cumulative_hazard_ = pd.DataFrame()
for stratum, stratified_X in X.groupby(self.strata):
try:
c_0 = self.baseline_cumulative_hazard_[[stratum]]
except KeyError:
raise StatError(
"""The stratum %s was not found in the original training data. For example, try
the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output.
"""
% (stratum, self.strata, stratum)
)
col = _get_index(stratified_X)
v = self.predict_partial_hazard(stratified_X)
cumulative_hazard_ = cumulative_hazard_.merge(
pd.DataFrame(np.dot(c_0, v.T), index=c_0.index, columns=col),
how="outer",
right_index=True,
left_index=True,
)
else:
c_0 = self.baseline_cumulative_hazard_
v = self.predict_partial_hazard(X)
col = _get_index(v)
cumulative_hazard_ = pd.DataFrame(np.dot(c_0, v.T), columns=col, index=c_0.index)
if times is not None:
# non-linear interpolations can push the survival curves above 1 and below 0.
return dataframe_interpolate_at_times(cumulative_hazard_, times)
return cumulative_hazard_
|
def function[predict_cumulative_hazard, parameter[self, X, times]]:
constant[
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
Returns
-------
cumulative_hazard_ : DataFrame
the cumulative hazard of individuals over the timeline
]
if name[self].strata begin[:]
variable[cumulative_hazard_] assign[=] call[name[pd].DataFrame, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c6a9b70>, <ast.Name object at 0x7da20c6a8b50>]]] in starred[call[name[X].groupby, parameter[name[self].strata]]] begin[:]
<ast.Try object at 0x7da20c6aaef0>
variable[col] assign[=] call[name[_get_index], parameter[name[stratified_X]]]
variable[v] assign[=] call[name[self].predict_partial_hazard, parameter[name[stratified_X]]]
variable[cumulative_hazard_] assign[=] call[name[cumulative_hazard_].merge, parameter[call[name[pd].DataFrame, parameter[call[name[np].dot, parameter[name[c_0], name[v].T]]]]]]
if compare[name[times] is_not constant[None]] begin[:]
return[call[name[dataframe_interpolate_at_times], parameter[name[cumulative_hazard_], name[times]]]]
return[name[cumulative_hazard_]]
|
keyword[def] identifier[predict_cumulative_hazard] ( identifier[self] , identifier[X] , identifier[times] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[strata] :
identifier[cumulative_hazard_] = identifier[pd] . identifier[DataFrame] ()
keyword[for] identifier[stratum] , identifier[stratified_X] keyword[in] identifier[X] . identifier[groupby] ( identifier[self] . identifier[strata] ):
keyword[try] :
identifier[c_0] = identifier[self] . identifier[baseline_cumulative_hazard_] [[ identifier[stratum] ]]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[StatError] (
literal[string]
%( identifier[stratum] , identifier[self] . identifier[strata] , identifier[stratum] )
)
identifier[col] = identifier[_get_index] ( identifier[stratified_X] )
identifier[v] = identifier[self] . identifier[predict_partial_hazard] ( identifier[stratified_X] )
identifier[cumulative_hazard_] = identifier[cumulative_hazard_] . identifier[merge] (
identifier[pd] . identifier[DataFrame] ( identifier[np] . identifier[dot] ( identifier[c_0] , identifier[v] . identifier[T] ), identifier[index] = identifier[c_0] . identifier[index] , identifier[columns] = identifier[col] ),
identifier[how] = literal[string] ,
identifier[right_index] = keyword[True] ,
identifier[left_index] = keyword[True] ,
)
keyword[else] :
identifier[c_0] = identifier[self] . identifier[baseline_cumulative_hazard_]
identifier[v] = identifier[self] . identifier[predict_partial_hazard] ( identifier[X] )
identifier[col] = identifier[_get_index] ( identifier[v] )
identifier[cumulative_hazard_] = identifier[pd] . identifier[DataFrame] ( identifier[np] . identifier[dot] ( identifier[c_0] , identifier[v] . identifier[T] ), identifier[columns] = identifier[col] , identifier[index] = identifier[c_0] . identifier[index] )
keyword[if] identifier[times] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[dataframe_interpolate_at_times] ( identifier[cumulative_hazard_] , identifier[times] )
keyword[return] identifier[cumulative_hazard_]
|
def predict_cumulative_hazard(self, X, times=None):
"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
Returns
-------
cumulative_hazard_ : DataFrame
the cumulative hazard of individuals over the timeline
"""
if self.strata:
cumulative_hazard_ = pd.DataFrame()
for (stratum, stratified_X) in X.groupby(self.strata):
try:
c_0 = self.baseline_cumulative_hazard_[[stratum]] # depends on [control=['try'], data=[]]
except KeyError:
raise StatError('The stratum %s was not found in the original training data. For example, try\nthe following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output.\n' % (stratum, self.strata, stratum)) # depends on [control=['except'], data=[]]
col = _get_index(stratified_X)
v = self.predict_partial_hazard(stratified_X)
cumulative_hazard_ = cumulative_hazard_.merge(pd.DataFrame(np.dot(c_0, v.T), index=c_0.index, columns=col), how='outer', right_index=True, left_index=True) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
c_0 = self.baseline_cumulative_hazard_
v = self.predict_partial_hazard(X)
col = _get_index(v)
cumulative_hazard_ = pd.DataFrame(np.dot(c_0, v.T), columns=col, index=c_0.index)
if times is not None:
# non-linear interpolations can push the survival curves above 1 and below 0.
return dataframe_interpolate_at_times(cumulative_hazard_, times) # depends on [control=['if'], data=['times']]
return cumulative_hazard_
|
def ReadFlowRequestsReadyForProcessing(self,
client_id,
flow_id,
next_needed_request=None):
"""Reads all requests for a flow that can be processed by the worker."""
request_dict = self.flow_requests.get((client_id, flow_id), {})
response_dict = self.flow_responses.get((client_id, flow_id), {})
res = {}
for request_id in sorted(request_dict):
# Ignore outdated requests.
if request_id < next_needed_request:
continue
# The request we are currently looking for is not in yet, we are done.
if request_id != next_needed_request:
break
request = request_dict[request_id]
if not request.needs_processing:
break
responses = sorted(
itervalues(response_dict.get(request_id, {})),
key=lambda response: response.response_id)
# Serialize/deserialize responses to better simulate the
# real DB behavior (where serialization/deserialization is almost
# guaranteed to be done).
# TODO(user): change mem-db implementation to do
# serialization/deserialization everywhere in a generic way.
responses = [
r.__class__.FromSerializedString(r.SerializeToString())
for r in responses
]
res[request_id] = (request, responses)
next_needed_request += 1
return res
|
def function[ReadFlowRequestsReadyForProcessing, parameter[self, client_id, flow_id, next_needed_request]]:
constant[Reads all requests for a flow that can be processed by the worker.]
variable[request_dict] assign[=] call[name[self].flow_requests.get, parameter[tuple[[<ast.Name object at 0x7da1b1c193f0>, <ast.Name object at 0x7da1b1c1a800>]], dictionary[[], []]]]
variable[response_dict] assign[=] call[name[self].flow_responses.get, parameter[tuple[[<ast.Name object at 0x7da1b1c181c0>, <ast.Name object at 0x7da1b1c1baf0>]], dictionary[[], []]]]
variable[res] assign[=] dictionary[[], []]
for taget[name[request_id]] in starred[call[name[sorted], parameter[name[request_dict]]]] begin[:]
if compare[name[request_id] less[<] name[next_needed_request]] begin[:]
continue
if compare[name[request_id] not_equal[!=] name[next_needed_request]] begin[:]
break
variable[request] assign[=] call[name[request_dict]][name[request_id]]
if <ast.UnaryOp object at 0x7da1b1c1b310> begin[:]
break
variable[responses] assign[=] call[name[sorted], parameter[call[name[itervalues], parameter[call[name[response_dict].get, parameter[name[request_id], dictionary[[], []]]]]]]]
variable[responses] assign[=] <ast.ListComp object at 0x7da1b1c1bb50>
call[name[res]][name[request_id]] assign[=] tuple[[<ast.Name object at 0x7da1b1c1bb80>, <ast.Name object at 0x7da1b1c18340>]]
<ast.AugAssign object at 0x7da1b1c19840>
return[name[res]]
|
keyword[def] identifier[ReadFlowRequestsReadyForProcessing] ( identifier[self] ,
identifier[client_id] ,
identifier[flow_id] ,
identifier[next_needed_request] = keyword[None] ):
literal[string]
identifier[request_dict] = identifier[self] . identifier[flow_requests] . identifier[get] (( identifier[client_id] , identifier[flow_id] ),{})
identifier[response_dict] = identifier[self] . identifier[flow_responses] . identifier[get] (( identifier[client_id] , identifier[flow_id] ),{})
identifier[res] ={}
keyword[for] identifier[request_id] keyword[in] identifier[sorted] ( identifier[request_dict] ):
keyword[if] identifier[request_id] < identifier[next_needed_request] :
keyword[continue]
keyword[if] identifier[request_id] != identifier[next_needed_request] :
keyword[break]
identifier[request] = identifier[request_dict] [ identifier[request_id] ]
keyword[if] keyword[not] identifier[request] . identifier[needs_processing] :
keyword[break]
identifier[responses] = identifier[sorted] (
identifier[itervalues] ( identifier[response_dict] . identifier[get] ( identifier[request_id] ,{})),
identifier[key] = keyword[lambda] identifier[response] : identifier[response] . identifier[response_id] )
identifier[responses] =[
identifier[r] . identifier[__class__] . identifier[FromSerializedString] ( identifier[r] . identifier[SerializeToString] ())
keyword[for] identifier[r] keyword[in] identifier[responses]
]
identifier[res] [ identifier[request_id] ]=( identifier[request] , identifier[responses] )
identifier[next_needed_request] += literal[int]
keyword[return] identifier[res]
|
def ReadFlowRequestsReadyForProcessing(self, client_id, flow_id, next_needed_request=None):
"""Reads all requests for a flow that can be processed by the worker."""
request_dict = self.flow_requests.get((client_id, flow_id), {})
response_dict = self.flow_responses.get((client_id, flow_id), {})
res = {}
for request_id in sorted(request_dict):
# Ignore outdated requests.
if request_id < next_needed_request:
continue # depends on [control=['if'], data=[]]
# The request we are currently looking for is not in yet, we are done.
if request_id != next_needed_request:
break # depends on [control=['if'], data=[]]
request = request_dict[request_id]
if not request.needs_processing:
break # depends on [control=['if'], data=[]]
responses = sorted(itervalues(response_dict.get(request_id, {})), key=lambda response: response.response_id)
# Serialize/deserialize responses to better simulate the
# real DB behavior (where serialization/deserialization is almost
# guaranteed to be done).
# TODO(user): change mem-db implementation to do
# serialization/deserialization everywhere in a generic way.
responses = [r.__class__.FromSerializedString(r.SerializeToString()) for r in responses]
res[request_id] = (request, responses)
next_needed_request += 1 # depends on [control=['for'], data=['request_id']]
return res
|
def thumb(self, obj):
"""
Generates html and thumbnails for admin site.
"""
format, created = Format.objects.get_or_create(name='newman_thumb',
defaults={
'max_width': 100,
'max_height': 100,
'flexible_height': False,
'stretch': False,
'nocrop': True,
})
if created:
format.sites = Site.objects.all()
info = obj.get_formated_photo(format)
return '<a href="%(href)s"><img src="%(src)s"></a>' % {
'href': '%s/' % obj.pk,
'src': info['url']
}
|
def function[thumb, parameter[self, obj]]:
constant[
Generates html and thumbnails for admin site.
]
<ast.Tuple object at 0x7da18f09fd30> assign[=] call[name[Format].objects.get_or_create, parameter[]]
if name[created] begin[:]
name[format].sites assign[=] call[name[Site].objects.all, parameter[]]
variable[info] assign[=] call[name[obj].get_formated_photo, parameter[name[format]]]
return[binary_operation[constant[<a href="%(href)s"><img src="%(src)s"></a>] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da18f09ded0>, <ast.Constant object at 0x7da18f09f520>], [<ast.BinOp object at 0x7da18f09d330>, <ast.Subscript object at 0x7da2054a6890>]]]]
|
keyword[def] identifier[thumb] ( identifier[self] , identifier[obj] ):
literal[string]
identifier[format] , identifier[created] = identifier[Format] . identifier[objects] . identifier[get_or_create] ( identifier[name] = literal[string] ,
identifier[defaults] ={
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[True] ,
})
keyword[if] identifier[created] :
identifier[format] . identifier[sites] = identifier[Site] . identifier[objects] . identifier[all] ()
identifier[info] = identifier[obj] . identifier[get_formated_photo] ( identifier[format] )
keyword[return] literal[string] %{
literal[string] : literal[string] % identifier[obj] . identifier[pk] ,
literal[string] : identifier[info] [ literal[string] ]
}
|
def thumb(self, obj):
"""
Generates html and thumbnails for admin site.
"""
(format, created) = Format.objects.get_or_create(name='newman_thumb', defaults={'max_width': 100, 'max_height': 100, 'flexible_height': False, 'stretch': False, 'nocrop': True})
if created:
format.sites = Site.objects.all() # depends on [control=['if'], data=[]]
info = obj.get_formated_photo(format)
return '<a href="%(href)s"><img src="%(src)s"></a>' % {'href': '%s/' % obj.pk, 'src': info['url']}
|
def print_network_spec(mlmodel_spec, interface_only=False):
""" Print the network information summary.
Args:
mlmodel_spec : the mlmodel spec
interface_only : Shows only the input and output of the network
"""
inputs, outputs, layers_info = summarize_neural_network_spec(mlmodel_spec)
print('Inputs:')
for i in inputs:
name, description = i
print(' {} {}'.format(name, description))
print('Outputs:')
for o in outputs:
name, description = o
print(' {} {}'.format(name, description))
if layers_info is None:
print('\n(This MLModel is not a neural network model or does not contain any layers)')
if layers_info and not interface_only:
print('\nLayers:')
for idx, l in enumerate(layers_info):
layer_type, name, in_blobs, out_blobs, params_info = l
print('[{}] ({}) {}'.format(idx, layer_type, name))
print(' Input blobs: {}'.format(in_blobs))
print(' Output blobs: {}'.format(out_blobs))
if len(params_info) > 0:
print(' Parameters: ')
for param in params_info:
print(' {} = {}'.format(param[0], param[1]))
print('\n')
|
def function[print_network_spec, parameter[mlmodel_spec, interface_only]]:
constant[ Print the network information summary.
Args:
mlmodel_spec : the mlmodel spec
interface_only : Shows only the input and output of the network
]
<ast.Tuple object at 0x7da1b1f774c0> assign[=] call[name[summarize_neural_network_spec], parameter[name[mlmodel_spec]]]
call[name[print], parameter[constant[Inputs:]]]
for taget[name[i]] in starred[name[inputs]] begin[:]
<ast.Tuple object at 0x7da1b1f74850> assign[=] name[i]
call[name[print], parameter[call[constant[ {} {}].format, parameter[name[name], name[description]]]]]
call[name[print], parameter[constant[Outputs:]]]
for taget[name[o]] in starred[name[outputs]] begin[:]
<ast.Tuple object at 0x7da1b1f75ea0> assign[=] name[o]
call[name[print], parameter[call[constant[ {} {}].format, parameter[name[name], name[description]]]]]
if compare[name[layers_info] is constant[None]] begin[:]
call[name[print], parameter[constant[
(This MLModel is not a neural network model or does not contain any layers)]]]
if <ast.BoolOp object at 0x7da1b2059210> begin[:]
call[name[print], parameter[constant[
Layers:]]]
for taget[tuple[[<ast.Name object at 0x7da1b2059390>, <ast.Name object at 0x7da1b2058460>]]] in starred[call[name[enumerate], parameter[name[layers_info]]]] begin[:]
<ast.Tuple object at 0x7da1b205ace0> assign[=] name[l]
call[name[print], parameter[call[constant[[{}] ({}) {}].format, parameter[name[idx], name[layer_type], name[name]]]]]
call[name[print], parameter[call[constant[ Input blobs: {}].format, parameter[name[in_blobs]]]]]
call[name[print], parameter[call[constant[ Output blobs: {}].format, parameter[name[out_blobs]]]]]
if compare[call[name[len], parameter[name[params_info]]] greater[>] constant[0]] begin[:]
call[name[print], parameter[constant[ Parameters: ]]]
for taget[name[param]] in starred[name[params_info]] begin[:]
call[name[print], parameter[call[constant[ {} = {}].format, parameter[call[name[param]][constant[0]], call[name[param]][constant[1]]]]]]
call[name[print], parameter[constant[
]]]
|
keyword[def] identifier[print_network_spec] ( identifier[mlmodel_spec] , identifier[interface_only] = keyword[False] ):
literal[string]
identifier[inputs] , identifier[outputs] , identifier[layers_info] = identifier[summarize_neural_network_spec] ( identifier[mlmodel_spec] )
identifier[print] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[inputs] :
identifier[name] , identifier[description] = identifier[i]
identifier[print] ( literal[string] . identifier[format] ( identifier[name] , identifier[description] ))
identifier[print] ( literal[string] )
keyword[for] identifier[o] keyword[in] identifier[outputs] :
identifier[name] , identifier[description] = identifier[o]
identifier[print] ( literal[string] . identifier[format] ( identifier[name] , identifier[description] ))
keyword[if] identifier[layers_info] keyword[is] keyword[None] :
identifier[print] ( literal[string] )
keyword[if] identifier[layers_info] keyword[and] keyword[not] identifier[interface_only] :
identifier[print] ( literal[string] )
keyword[for] identifier[idx] , identifier[l] keyword[in] identifier[enumerate] ( identifier[layers_info] ):
identifier[layer_type] , identifier[name] , identifier[in_blobs] , identifier[out_blobs] , identifier[params_info] = identifier[l]
identifier[print] ( literal[string] . identifier[format] ( identifier[idx] , identifier[layer_type] , identifier[name] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[in_blobs] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[out_blobs] ))
keyword[if] identifier[len] ( identifier[params_info] )> literal[int] :
identifier[print] ( literal[string] )
keyword[for] identifier[param] keyword[in] identifier[params_info] :
identifier[print] ( literal[string] . identifier[format] ( identifier[param] [ literal[int] ], identifier[param] [ literal[int] ]))
identifier[print] ( literal[string] )
|
def print_network_spec(mlmodel_spec, interface_only=False):
""" Print the network information summary.
Args:
mlmodel_spec : the mlmodel spec
interface_only : Shows only the input and output of the network
"""
(inputs, outputs, layers_info) = summarize_neural_network_spec(mlmodel_spec)
print('Inputs:')
for i in inputs:
(name, description) = i
print(' {} {}'.format(name, description)) # depends on [control=['for'], data=['i']]
print('Outputs:')
for o in outputs:
(name, description) = o
print(' {} {}'.format(name, description)) # depends on [control=['for'], data=['o']]
if layers_info is None:
print('\n(This MLModel is not a neural network model or does not contain any layers)') # depends on [control=['if'], data=[]]
if layers_info and (not interface_only):
print('\nLayers:')
for (idx, l) in enumerate(layers_info):
(layer_type, name, in_blobs, out_blobs, params_info) = l
print('[{}] ({}) {}'.format(idx, layer_type, name))
print(' Input blobs: {}'.format(in_blobs))
print(' Output blobs: {}'.format(out_blobs))
if len(params_info) > 0:
print(' Parameters: ') # depends on [control=['if'], data=[]]
for param in params_info:
print(' {} = {}'.format(param[0], param[1])) # depends on [control=['for'], data=['param']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
print('\n')
|
def hyperprior(self):
"""Combined hyperprior for the kernel, noise kernel and (if present) mean function.
"""
hp = self.k.hyperprior * self.noise_k.hyperprior
if self.mu is not None:
hp *= self.mu.hyperprior
return hp
|
def function[hyperprior, parameter[self]]:
constant[Combined hyperprior for the kernel, noise kernel and (if present) mean function.
]
variable[hp] assign[=] binary_operation[name[self].k.hyperprior * name[self].noise_k.hyperprior]
if compare[name[self].mu is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da18fe92770>
return[name[hp]]
|
keyword[def] identifier[hyperprior] ( identifier[self] ):
literal[string]
identifier[hp] = identifier[self] . identifier[k] . identifier[hyperprior] * identifier[self] . identifier[noise_k] . identifier[hyperprior]
keyword[if] identifier[self] . identifier[mu] keyword[is] keyword[not] keyword[None] :
identifier[hp] *= identifier[self] . identifier[mu] . identifier[hyperprior]
keyword[return] identifier[hp]
|
def hyperprior(self):
"""Combined hyperprior for the kernel, noise kernel and (if present) mean function.
"""
hp = self.k.hyperprior * self.noise_k.hyperprior
if self.mu is not None:
hp *= self.mu.hyperprior # depends on [control=['if'], data=[]]
return hp
|
def remove_all_gap_columns( self ):
"""
Remove any columns containing only gaps from alignment components,
text of components is modified IN PLACE.
"""
seqs = []
for c in self.components:
try:
seqs.append( list( c.text ) )
except TypeError:
seqs.append( None )
i = 0
text_size = self.text_size
while i < text_size:
all_gap = True
for seq in seqs:
if seq is None: continue
if seq[i] != '-': all_gap = False
if all_gap:
for seq in seqs:
if seq is None: continue
del seq[i]
text_size -= 1
else:
i += 1
for i in range( len( self.components ) ):
if seqs[i] is None: continue
self.components[i].text = ''.join( seqs[i] )
self.text_size = text_size
|
def function[remove_all_gap_columns, parameter[self]]:
constant[
Remove any columns containing only gaps from alignment components,
text of components is modified IN PLACE.
]
variable[seqs] assign[=] list[[]]
for taget[name[c]] in starred[name[self].components] begin[:]
<ast.Try object at 0x7da1b0d42740>
variable[i] assign[=] constant[0]
variable[text_size] assign[=] name[self].text_size
while compare[name[i] less[<] name[text_size]] begin[:]
variable[all_gap] assign[=] constant[True]
for taget[name[seq]] in starred[name[seqs]] begin[:]
if compare[name[seq] is constant[None]] begin[:]
continue
if compare[call[name[seq]][name[i]] not_equal[!=] constant[-]] begin[:]
variable[all_gap] assign[=] constant[False]
if name[all_gap] begin[:]
for taget[name[seq]] in starred[name[seqs]] begin[:]
if compare[name[seq] is constant[None]] begin[:]
continue
<ast.Delete object at 0x7da1b0d51b10>
<ast.AugAssign object at 0x7da1b0d51d20>
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].components]]]]] begin[:]
if compare[call[name[seqs]][name[i]] is constant[None]] begin[:]
continue
call[name[self].components][name[i]].text assign[=] call[constant[].join, parameter[call[name[seqs]][name[i]]]]
name[self].text_size assign[=] name[text_size]
|
keyword[def] identifier[remove_all_gap_columns] ( identifier[self] ):
literal[string]
identifier[seqs] =[]
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[components] :
keyword[try] :
identifier[seqs] . identifier[append] ( identifier[list] ( identifier[c] . identifier[text] ))
keyword[except] identifier[TypeError] :
identifier[seqs] . identifier[append] ( keyword[None] )
identifier[i] = literal[int]
identifier[text_size] = identifier[self] . identifier[text_size]
keyword[while] identifier[i] < identifier[text_size] :
identifier[all_gap] = keyword[True]
keyword[for] identifier[seq] keyword[in] identifier[seqs] :
keyword[if] identifier[seq] keyword[is] keyword[None] : keyword[continue]
keyword[if] identifier[seq] [ identifier[i] ]!= literal[string] : identifier[all_gap] = keyword[False]
keyword[if] identifier[all_gap] :
keyword[for] identifier[seq] keyword[in] identifier[seqs] :
keyword[if] identifier[seq] keyword[is] keyword[None] : keyword[continue]
keyword[del] identifier[seq] [ identifier[i] ]
identifier[text_size] -= literal[int]
keyword[else] :
identifier[i] += literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[components] )):
keyword[if] identifier[seqs] [ identifier[i] ] keyword[is] keyword[None] : keyword[continue]
identifier[self] . identifier[components] [ identifier[i] ]. identifier[text] = literal[string] . identifier[join] ( identifier[seqs] [ identifier[i] ])
identifier[self] . identifier[text_size] = identifier[text_size]
|
def remove_all_gap_columns(self):
"""
Remove any columns containing only gaps from alignment components,
text of components is modified IN PLACE.
"""
seqs = []
for c in self.components:
try:
seqs.append(list(c.text)) # depends on [control=['try'], data=[]]
except TypeError:
seqs.append(None) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['c']]
i = 0
text_size = self.text_size
while i < text_size:
all_gap = True
for seq in seqs:
if seq is None:
continue # depends on [control=['if'], data=[]]
if seq[i] != '-':
all_gap = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['seq']]
if all_gap:
for seq in seqs:
if seq is None:
continue # depends on [control=['if'], data=[]]
del seq[i] # depends on [control=['for'], data=['seq']]
text_size -= 1 # depends on [control=['if'], data=[]]
else:
i += 1 # depends on [control=['while'], data=['i', 'text_size']]
for i in range(len(self.components)):
if seqs[i] is None:
continue # depends on [control=['if'], data=[]]
self.components[i].text = ''.join(seqs[i]) # depends on [control=['for'], data=['i']]
self.text_size = text_size
|
def copyNodeList(self):
"""Do a recursive copy of the node list. Use
xmlDocCopyNodeList() if possible to ensure string interning. """
ret = libxml2mod.xmlCopyNodeList(self._o)
if ret is None:raise treeError('xmlCopyNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
|
def function[copyNodeList, parameter[self]]:
constant[Do a recursive copy of the node list. Use
xmlDocCopyNodeList() if possible to ensure string interning. ]
variable[ret] assign[=] call[name[libxml2mod].xmlCopyNodeList, parameter[name[self]._o]]
if compare[name[ret] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1fa70d0>
variable[__tmp] assign[=] call[name[xmlNode], parameter[]]
return[name[__tmp]]
|
keyword[def] identifier[copyNodeList] ( identifier[self] ):
literal[string]
identifier[ret] = identifier[libxml2mod] . identifier[xmlCopyNodeList] ( identifier[self] . identifier[_o] )
keyword[if] identifier[ret] keyword[is] keyword[None] : keyword[raise] identifier[treeError] ( literal[string] )
identifier[__tmp] = identifier[xmlNode] ( identifier[_obj] = identifier[ret] )
keyword[return] identifier[__tmp]
|
def copyNodeList(self):
"""Do a recursive copy of the node list. Use
xmlDocCopyNodeList() if possible to ensure string interning. """
ret = libxml2mod.xmlCopyNodeList(self._o)
if ret is None:
raise treeError('xmlCopyNodeList() failed') # depends on [control=['if'], data=[]]
__tmp = xmlNode(_obj=ret)
return __tmp
|
def _create_sata_controllers(sata_controllers):
'''
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing
SATA controllers
sata_controllers
SATA properties
'''
sata_ctrls = []
keys = range(-15000, -15050, -1)
if sata_controllers:
devs = [sata['adapter'] for sata in sata_controllers]
log.trace('Creating SATA controllers %s', devs)
for sata, key in zip(sata_controllers, keys):
sata_ctrls.append(_apply_sata_controller_config(
sata['adapter'], 'add', key, sata['bus_number']))
return sata_ctrls
|
def function[_create_sata_controllers, parameter[sata_controllers]]:
constant[
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing
SATA controllers
sata_controllers
SATA properties
]
variable[sata_ctrls] assign[=] list[[]]
variable[keys] assign[=] call[name[range], parameter[<ast.UnaryOp object at 0x7da204567340>, <ast.UnaryOp object at 0x7da204566380>, <ast.UnaryOp object at 0x7da204565fc0>]]
if name[sata_controllers] begin[:]
variable[devs] assign[=] <ast.ListComp object at 0x7da204564d30>
call[name[log].trace, parameter[constant[Creating SATA controllers %s], name[devs]]]
for taget[tuple[[<ast.Name object at 0x7da20e960220>, <ast.Name object at 0x7da20e963e20>]]] in starred[call[name[zip], parameter[name[sata_controllers], name[keys]]]] begin[:]
call[name[sata_ctrls].append, parameter[call[name[_apply_sata_controller_config], parameter[call[name[sata]][constant[adapter]], constant[add], name[key], call[name[sata]][constant[bus_number]]]]]]
return[name[sata_ctrls]]
|
keyword[def] identifier[_create_sata_controllers] ( identifier[sata_controllers] ):
literal[string]
identifier[sata_ctrls] =[]
identifier[keys] = identifier[range] (- literal[int] ,- literal[int] ,- literal[int] )
keyword[if] identifier[sata_controllers] :
identifier[devs] =[ identifier[sata] [ literal[string] ] keyword[for] identifier[sata] keyword[in] identifier[sata_controllers] ]
identifier[log] . identifier[trace] ( literal[string] , identifier[devs] )
keyword[for] identifier[sata] , identifier[key] keyword[in] identifier[zip] ( identifier[sata_controllers] , identifier[keys] ):
identifier[sata_ctrls] . identifier[append] ( identifier[_apply_sata_controller_config] (
identifier[sata] [ literal[string] ], literal[string] , identifier[key] , identifier[sata] [ literal[string] ]))
keyword[return] identifier[sata_ctrls]
|
def _create_sata_controllers(sata_controllers):
"""
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing
SATA controllers
sata_controllers
SATA properties
"""
sata_ctrls = []
keys = range(-15000, -15050, -1)
if sata_controllers:
devs = [sata['adapter'] for sata in sata_controllers]
log.trace('Creating SATA controllers %s', devs)
for (sata, key) in zip(sata_controllers, keys):
sata_ctrls.append(_apply_sata_controller_config(sata['adapter'], 'add', key, sata['bus_number'])) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return sata_ctrls
|
def federated_query(self,
environment_id,
filter=None,
query=None,
natural_language_query=None,
passages=None,
aggregation=None,
count=None,
return_fields=None,
offset=None,
sort=None,
highlight=None,
passages_fields=None,
passages_count=None,
passages_characters=None,
deduplicate=None,
deduplicate_field=None,
collection_ids=None,
similar=None,
similar_document_ids=None,
similar_fields=None,
bias=None,
logging_opt_out=None,
**kwargs):
"""
Long environment queries.
Complex queries might be too long for a standard method query. By using this
method, you can construct longer queries. However, these queries may take longer
to complete than the standard method. For details, see the [Discovery service
documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts).
:param str environment_id: The ID of the environment.
:param str filter: A cacheable query that excludes documents that don't mention
the query content. Filter searches are better for metadata-type searches and for
assessing the concepts in the data set.
:param str query: A query search returns all documents in your data set with full
enrichments and full text, but with the most relevant documents listed first. Use
a query search when you want to find the most relevant search results. You cannot
use **natural_language_query** and **query** at the same time.
:param str natural_language_query: A natural language query that returns relevant
documents by utilizing training data and natural language understanding. You
cannot use **natural_language_query** and **query** at the same time.
:param bool passages: A passages query that returns the most relevant passages
from the results.
:param str aggregation: An aggregation search that returns an exact answer by
combining query search with filters. Useful for applications to build lists,
tables, and time series. For a full list of possible aggregations, see the Query
reference.
:param int count: Number of results to return.
:param str return_fields: A comma-separated list of the portion of the document
hierarchy to return.
:param int offset: The number of query results to skip at the beginning. For
example, if the total number of results that are returned is 10 and the offset is
8, it returns the last two results.
:param str sort: A comma-separated list of fields in the document to sort on. You
can optionally specify a sort direction by prefixing the field with `-` for
descending or `+` for ascending. Ascending is the default sort direction if no
prefix is specified. This parameter cannot be used in the same query as the
**bias** parameter.
:param bool highlight: When true, a highlight field is returned for each result
which contains the fields which match the query with `<em></em>` tags around the
matching query terms.
:param str passages_fields: A comma-separated list of fields that passages are
drawn from. If this parameter not specified, then all top-level fields are
included.
:param int passages_count: The maximum number of passages to return. The search
returns fewer passages if the requested total is not found. The default is `10`.
The maximum is `100`.
:param int passages_characters: The approximate number of characters that any one
passage will have.
:param bool deduplicate: When `true`, and used with a Watson Discovery News
collection, duplicate results (based on the contents of the **title** field) are
removed. Duplicate comparison is limited to the current query only; **offset** is
not considered. This parameter is currently Beta functionality.
:param str deduplicate_field: When specified, duplicate results based on the field
specified are removed from the returned results. Duplicate comparison is limited
to the current query only, **offset** is not considered. This parameter is
currently Beta functionality.
:param str collection_ids: A comma-separated list of collection IDs to be queried
against. Required when querying multiple collections, invalid when performing a
single collection query.
:param bool similar: When `true`, results are returned based on their similarity
to the document IDs specified in the **similar.document_ids** parameter.
:param str similar_document_ids: A comma-separated list of document IDs to find
similar documents.
**Tip:** Include the **natural_language_query** parameter to expand the scope of
the document similarity search with the natural language query. Other query
parameters, such as **filter** and **query**, are subsequently applied and reduce
the scope.
:param str similar_fields: A comma-separated list of field names that are used as
a basis for comparison to identify similar documents. If not specified, the entire
document is used for comparison.
:param str bias: Field which the returned results will be biased against. The
specified field must be either a **date** or **number** format. When a **date**
type field is specified returned results are biased towards field values closer to
the current date. When a **number** type field is specified, returned results are
biased towards higher field values. This parameter cannot be used in the same
query as the **sort** parameter.
:param bool logging_opt_out: If `true`, queries are not stored in the Discovery
**Logs** endpoint.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if environment_id is None:
raise ValueError('environment_id must be provided')
headers = {'X-Watson-Logging-Opt-Out': logging_opt_out}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('discovery', 'V1', 'federated_query')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'filter': filter,
'query': query,
'natural_language_query': natural_language_query,
'passages': passages,
'aggregation': aggregation,
'count': count,
'return': return_fields,
'offset': offset,
'sort': sort,
'highlight': highlight,
'passages.fields': passages_fields,
'passages.count': passages_count,
'passages.characters': passages_characters,
'deduplicate': deduplicate,
'deduplicate.field': deduplicate_field,
'collection_ids': collection_ids,
'similar': similar,
'similar.document_ids': similar_document_ids,
'similar.fields': similar_fields,
'bias': bias
}
url = '/v1/environments/{0}/query'.format(
*self._encode_path_vars(environment_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
|
def function[federated_query, parameter[self, environment_id, filter, query, natural_language_query, passages, aggregation, count, return_fields, offset, sort, highlight, passages_fields, passages_count, passages_characters, deduplicate, deduplicate_field, collection_ids, similar, similar_document_ids, similar_fields, bias, logging_opt_out]]:
constant[
Long environment queries.
Complex queries might be too long for a standard method query. By using this
method, you can construct longer queries. However, these queries may take longer
to complete than the standard method. For details, see the [Discovery service
documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts).
:param str environment_id: The ID of the environment.
:param str filter: A cacheable query that excludes documents that don't mention
the query content. Filter searches are better for metadata-type searches and for
assessing the concepts in the data set.
:param str query: A query search returns all documents in your data set with full
enrichments and full text, but with the most relevant documents listed first. Use
a query search when you want to find the most relevant search results. You cannot
use **natural_language_query** and **query** at the same time.
:param str natural_language_query: A natural language query that returns relevant
documents by utilizing training data and natural language understanding. You
cannot use **natural_language_query** and **query** at the same time.
:param bool passages: A passages query that returns the most relevant passages
from the results.
:param str aggregation: An aggregation search that returns an exact answer by
combining query search with filters. Useful for applications to build lists,
tables, and time series. For a full list of possible aggregations, see the Query
reference.
:param int count: Number of results to return.
:param str return_fields: A comma-separated list of the portion of the document
hierarchy to return.
:param int offset: The number of query results to skip at the beginning. For
example, if the total number of results that are returned is 10 and the offset is
8, it returns the last two results.
:param str sort: A comma-separated list of fields in the document to sort on. You
can optionally specify a sort direction by prefixing the field with `-` for
descending or `+` for ascending. Ascending is the default sort direction if no
prefix is specified. This parameter cannot be used in the same query as the
**bias** parameter.
:param bool highlight: When true, a highlight field is returned for each result
which contains the fields which match the query with `<em></em>` tags around the
matching query terms.
:param str passages_fields: A comma-separated list of fields that passages are
drawn from. If this parameter not specified, then all top-level fields are
included.
:param int passages_count: The maximum number of passages to return. The search
returns fewer passages if the requested total is not found. The default is `10`.
The maximum is `100`.
:param int passages_characters: The approximate number of characters that any one
passage will have.
:param bool deduplicate: When `true`, and used with a Watson Discovery News
collection, duplicate results (based on the contents of the **title** field) are
removed. Duplicate comparison is limited to the current query only; **offset** is
not considered. This parameter is currently Beta functionality.
:param str deduplicate_field: When specified, duplicate results based on the field
specified are removed from the returned results. Duplicate comparison is limited
to the current query only, **offset** is not considered. This parameter is
currently Beta functionality.
:param str collection_ids: A comma-separated list of collection IDs to be queried
against. Required when querying multiple collections, invalid when performing a
single collection query.
:param bool similar: When `true`, results are returned based on their similarity
to the document IDs specified in the **similar.document_ids** parameter.
:param str similar_document_ids: A comma-separated list of document IDs to find
similar documents.
**Tip:** Include the **natural_language_query** parameter to expand the scope of
the document similarity search with the natural language query. Other query
parameters, such as **filter** and **query**, are subsequently applied and reduce
the scope.
:param str similar_fields: A comma-separated list of field names that are used as
a basis for comparison to identify similar documents. If not specified, the entire
document is used for comparison.
:param str bias: Field which the returned results will be biased against. The
specified field must be either a **date** or **number** format. When a **date**
type field is specified returned results are biased towards field values closer to
the current date. When a **number** type field is specified, returned results are
biased towards higher field values. This parameter cannot be used in the same
query as the **sort** parameter.
:param bool logging_opt_out: If `true`, queries are not stored in the Discovery
**Logs** endpoint.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
]
if compare[name[environment_id] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b2345f60>
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b2344220>], [<ast.Name object at 0x7da1b2347a90>]]
if compare[constant[headers] in name[kwargs]] begin[:]
call[name[headers].update, parameter[call[name[kwargs].get, parameter[constant[headers]]]]]
variable[sdk_headers] assign[=] call[name[get_sdk_headers], parameter[constant[discovery], constant[V1], constant[federated_query]]]
call[name[headers].update, parameter[name[sdk_headers]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc8670>], [<ast.Attribute object at 0x7da18bccb250>]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18bcca5c0>, <ast.Constant object at 0x7da18bcc8fd0>, <ast.Constant object at 0x7da18bcc8d00>, <ast.Constant object at 0x7da18bcc8520>, <ast.Constant object at 0x7da18bccb0a0>, <ast.Constant object at 0x7da18bcc8250>, <ast.Constant object at 0x7da18bcca080>, <ast.Constant object at 0x7da18bccb6d0>, <ast.Constant object at 0x7da18bccb760>, <ast.Constant object at 0x7da18bcc9690>, <ast.Constant object at 0x7da18bccbb50>, <ast.Constant object at 0x7da18bccafe0>, <ast.Constant object at 0x7da18bcc8490>, <ast.Constant object at 0x7da18bcc8f10>, <ast.Constant object at 0x7da18bcc91e0>, <ast.Constant object at 0x7da18bcc8700>, <ast.Constant object at 0x7da18bcca590>, <ast.Constant object at 0x7da18bcca320>, <ast.Constant object at 0x7da18bcc8ee0>, <ast.Constant object at 0x7da18bccbc10>], [<ast.Name object at 0x7da18bcca920>, <ast.Name object at 0x7da18bcc9d20>, <ast.Name object at 0x7da18bcc8970>, <ast.Name object at 0x7da18bccab00>, <ast.Name object at 0x7da18bcc8a00>, <ast.Name object at 0x7da18bcc8280>, <ast.Name object at 0x7da18bcc9480>, <ast.Name object at 0x7da18bcc9f00>, <ast.Name object at 0x7da18bcc9f60>, <ast.Name object at 0x7da18bccb160>, <ast.Name object at 0x7da18bccb520>, <ast.Name object at 0x7da18bccb280>, <ast.Name object at 0x7da18bcca740>, <ast.Name object at 0x7da18bcc84c0>, <ast.Name object at 0x7da18bccb9d0>, <ast.Name object at 0x7da18bcca710>, <ast.Name object at 0x7da18bcc83a0>, <ast.Name object at 0x7da18bcc9420>, <ast.Name object at 0x7da18bccbbe0>, <ast.Name object at 0x7da18bccbac0>]]
variable[url] assign[=] call[constant[/v1/environments/{0}/query].format, parameter[<ast.Starred object at 0x7da18bcc87f0>]]
variable[response] assign[=] call[name[self].request, parameter[]]
return[name[response]]
|
keyword[def] identifier[federated_query] ( identifier[self] ,
identifier[environment_id] ,
identifier[filter] = keyword[None] ,
identifier[query] = keyword[None] ,
identifier[natural_language_query] = keyword[None] ,
identifier[passages] = keyword[None] ,
identifier[aggregation] = keyword[None] ,
identifier[count] = keyword[None] ,
identifier[return_fields] = keyword[None] ,
identifier[offset] = keyword[None] ,
identifier[sort] = keyword[None] ,
identifier[highlight] = keyword[None] ,
identifier[passages_fields] = keyword[None] ,
identifier[passages_count] = keyword[None] ,
identifier[passages_characters] = keyword[None] ,
identifier[deduplicate] = keyword[None] ,
identifier[deduplicate_field] = keyword[None] ,
identifier[collection_ids] = keyword[None] ,
identifier[similar] = keyword[None] ,
identifier[similar_document_ids] = keyword[None] ,
identifier[similar_fields] = keyword[None] ,
identifier[bias] = keyword[None] ,
identifier[logging_opt_out] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[environment_id] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[headers] ={ literal[string] : identifier[logging_opt_out] }
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[headers] . identifier[update] ( identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[sdk_headers] = identifier[get_sdk_headers] ( literal[string] , literal[string] , literal[string] )
identifier[headers] . identifier[update] ( identifier[sdk_headers] )
identifier[params] ={ literal[string] : identifier[self] . identifier[version] }
identifier[data] ={
literal[string] : identifier[filter] ,
literal[string] : identifier[query] ,
literal[string] : identifier[natural_language_query] ,
literal[string] : identifier[passages] ,
literal[string] : identifier[aggregation] ,
literal[string] : identifier[count] ,
literal[string] : identifier[return_fields] ,
literal[string] : identifier[offset] ,
literal[string] : identifier[sort] ,
literal[string] : identifier[highlight] ,
literal[string] : identifier[passages_fields] ,
literal[string] : identifier[passages_count] ,
literal[string] : identifier[passages_characters] ,
literal[string] : identifier[deduplicate] ,
literal[string] : identifier[deduplicate_field] ,
literal[string] : identifier[collection_ids] ,
literal[string] : identifier[similar] ,
literal[string] : identifier[similar_document_ids] ,
literal[string] : identifier[similar_fields] ,
literal[string] : identifier[bias]
}
identifier[url] = literal[string] . identifier[format] (
* identifier[self] . identifier[_encode_path_vars] ( identifier[environment_id] ))
identifier[response] = identifier[self] . identifier[request] (
identifier[method] = literal[string] ,
identifier[url] = identifier[url] ,
identifier[headers] = identifier[headers] ,
identifier[params] = identifier[params] ,
identifier[json] = identifier[data] ,
identifier[accept_json] = keyword[True] )
keyword[return] identifier[response]
|
def federated_query(self, environment_id, filter=None, query=None, natural_language_query=None, passages=None, aggregation=None, count=None, return_fields=None, offset=None, sort=None, highlight=None, passages_fields=None, passages_count=None, passages_characters=None, deduplicate=None, deduplicate_field=None, collection_ids=None, similar=None, similar_document_ids=None, similar_fields=None, bias=None, logging_opt_out=None, **kwargs):
"""
Long environment queries.
Complex queries might be too long for a standard method query. By using this
method, you can construct longer queries. However, these queries may take longer
to complete than the standard method. For details, see the [Discovery service
documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts).
:param str environment_id: The ID of the environment.
:param str filter: A cacheable query that excludes documents that don't mention
the query content. Filter searches are better for metadata-type searches and for
assessing the concepts in the data set.
:param str query: A query search returns all documents in your data set with full
enrichments and full text, but with the most relevant documents listed first. Use
a query search when you want to find the most relevant search results. You cannot
use **natural_language_query** and **query** at the same time.
:param str natural_language_query: A natural language query that returns relevant
documents by utilizing training data and natural language understanding. You
cannot use **natural_language_query** and **query** at the same time.
:param bool passages: A passages query that returns the most relevant passages
from the results.
:param str aggregation: An aggregation search that returns an exact answer by
combining query search with filters. Useful for applications to build lists,
tables, and time series. For a full list of possible aggregations, see the Query
reference.
:param int count: Number of results to return.
:param str return_fields: A comma-separated list of the portion of the document
hierarchy to return.
:param int offset: The number of query results to skip at the beginning. For
example, if the total number of results that are returned is 10 and the offset is
8, it returns the last two results.
:param str sort: A comma-separated list of fields in the document to sort on. You
can optionally specify a sort direction by prefixing the field with `-` for
descending or `+` for ascending. Ascending is the default sort direction if no
prefix is specified. This parameter cannot be used in the same query as the
**bias** parameter.
:param bool highlight: When true, a highlight field is returned for each result
which contains the fields which match the query with `<em></em>` tags around the
matching query terms.
:param str passages_fields: A comma-separated list of fields that passages are
drawn from. If this parameter not specified, then all top-level fields are
included.
:param int passages_count: The maximum number of passages to return. The search
returns fewer passages if the requested total is not found. The default is `10`.
The maximum is `100`.
:param int passages_characters: The approximate number of characters that any one
passage will have.
:param bool deduplicate: When `true`, and used with a Watson Discovery News
collection, duplicate results (based on the contents of the **title** field) are
removed. Duplicate comparison is limited to the current query only; **offset** is
not considered. This parameter is currently Beta functionality.
:param str deduplicate_field: When specified, duplicate results based on the field
specified are removed from the returned results. Duplicate comparison is limited
to the current query only, **offset** is not considered. This parameter is
currently Beta functionality.
:param str collection_ids: A comma-separated list of collection IDs to be queried
against. Required when querying multiple collections, invalid when performing a
single collection query.
:param bool similar: When `true`, results are returned based on their similarity
to the document IDs specified in the **similar.document_ids** parameter.
:param str similar_document_ids: A comma-separated list of document IDs to find
similar documents.
**Tip:** Include the **natural_language_query** parameter to expand the scope of
the document similarity search with the natural language query. Other query
parameters, such as **filter** and **query**, are subsequently applied and reduce
the scope.
:param str similar_fields: A comma-separated list of field names that are used as
a basis for comparison to identify similar documents. If not specified, the entire
document is used for comparison.
:param str bias: Field which the returned results will be biased against. The
specified field must be either a **date** or **number** format. When a **date**
type field is specified returned results are biased towards field values closer to
the current date. When a **number** type field is specified, returned results are
biased towards higher field values. This parameter cannot be used in the same
query as the **sort** parameter.
:param bool logging_opt_out: If `true`, queries are not stored in the Discovery
**Logs** endpoint.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if environment_id is None:
raise ValueError('environment_id must be provided') # depends on [control=['if'], data=[]]
headers = {'X-Watson-Logging-Opt-Out': logging_opt_out}
if 'headers' in kwargs:
headers.update(kwargs.get('headers')) # depends on [control=['if'], data=['kwargs']]
sdk_headers = get_sdk_headers('discovery', 'V1', 'federated_query')
headers.update(sdk_headers)
params = {'version': self.version}
data = {'filter': filter, 'query': query, 'natural_language_query': natural_language_query, 'passages': passages, 'aggregation': aggregation, 'count': count, 'return': return_fields, 'offset': offset, 'sort': sort, 'highlight': highlight, 'passages.fields': passages_fields, 'passages.count': passages_count, 'passages.characters': passages_characters, 'deduplicate': deduplicate, 'deduplicate.field': deduplicate_field, 'collection_ids': collection_ids, 'similar': similar, 'similar.document_ids': similar_document_ids, 'similar.fields': similar_fields, 'bias': bias}
url = '/v1/environments/{0}/query'.format(*self._encode_path_vars(environment_id))
response = self.request(method='POST', url=url, headers=headers, params=params, json=data, accept_json=True)
return response
|
def _interpolate(self, colors, n=100):
""" Returns intermediary colors for given list of colors.
"""
gradient = []
for i in _range(n):
l = len(colors) - 1
x = int(1.0 * i / n * l)
x = min(x + 0, l)
y = min(x + 1, l)
base = 1.0 * n / l * x
d = (i - base) / (1.0 * n / l)
r = colors[x].r * (1 - d) + colors[y].r * d
g = colors[x].g * (1 - d) + colors[y].g * d
b = colors[x].b * (1 - d) + colors[y].b * d
a = colors[x].a * (1 - d) + colors[y].a * d
gradient.append(color(r, g, b, a, mode="rgb"))
gradient.append(colors[-1])
return gradient
|
def function[_interpolate, parameter[self, colors, n]]:
constant[ Returns intermediary colors for given list of colors.
]
variable[gradient] assign[=] list[[]]
for taget[name[i]] in starred[call[name[_range], parameter[name[n]]]] begin[:]
variable[l] assign[=] binary_operation[call[name[len], parameter[name[colors]]] - constant[1]]
variable[x] assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[constant[1.0] * name[i]] / name[n]] * name[l]]]]
variable[x] assign[=] call[name[min], parameter[binary_operation[name[x] + constant[0]], name[l]]]
variable[y] assign[=] call[name[min], parameter[binary_operation[name[x] + constant[1]], name[l]]]
variable[base] assign[=] binary_operation[binary_operation[binary_operation[constant[1.0] * name[n]] / name[l]] * name[x]]
variable[d] assign[=] binary_operation[binary_operation[name[i] - name[base]] / binary_operation[binary_operation[constant[1.0] * name[n]] / name[l]]]
variable[r] assign[=] binary_operation[binary_operation[call[name[colors]][name[x]].r * binary_operation[constant[1] - name[d]]] + binary_operation[call[name[colors]][name[y]].r * name[d]]]
variable[g] assign[=] binary_operation[binary_operation[call[name[colors]][name[x]].g * binary_operation[constant[1] - name[d]]] + binary_operation[call[name[colors]][name[y]].g * name[d]]]
variable[b] assign[=] binary_operation[binary_operation[call[name[colors]][name[x]].b * binary_operation[constant[1] - name[d]]] + binary_operation[call[name[colors]][name[y]].b * name[d]]]
variable[a] assign[=] binary_operation[binary_operation[call[name[colors]][name[x]].a * binary_operation[constant[1] - name[d]]] + binary_operation[call[name[colors]][name[y]].a * name[d]]]
call[name[gradient].append, parameter[call[name[color], parameter[name[r], name[g], name[b], name[a]]]]]
call[name[gradient].append, parameter[call[name[colors]][<ast.UnaryOp object at 0x7da1b00f7490>]]]
return[name[gradient]]
|
keyword[def] identifier[_interpolate] ( identifier[self] , identifier[colors] , identifier[n] = literal[int] ):
literal[string]
identifier[gradient] =[]
keyword[for] identifier[i] keyword[in] identifier[_range] ( identifier[n] ):
identifier[l] = identifier[len] ( identifier[colors] )- literal[int]
identifier[x] = identifier[int] ( literal[int] * identifier[i] / identifier[n] * identifier[l] )
identifier[x] = identifier[min] ( identifier[x] + literal[int] , identifier[l] )
identifier[y] = identifier[min] ( identifier[x] + literal[int] , identifier[l] )
identifier[base] = literal[int] * identifier[n] / identifier[l] * identifier[x]
identifier[d] =( identifier[i] - identifier[base] )/( literal[int] * identifier[n] / identifier[l] )
identifier[r] = identifier[colors] [ identifier[x] ]. identifier[r] *( literal[int] - identifier[d] )+ identifier[colors] [ identifier[y] ]. identifier[r] * identifier[d]
identifier[g] = identifier[colors] [ identifier[x] ]. identifier[g] *( literal[int] - identifier[d] )+ identifier[colors] [ identifier[y] ]. identifier[g] * identifier[d]
identifier[b] = identifier[colors] [ identifier[x] ]. identifier[b] *( literal[int] - identifier[d] )+ identifier[colors] [ identifier[y] ]. identifier[b] * identifier[d]
identifier[a] = identifier[colors] [ identifier[x] ]. identifier[a] *( literal[int] - identifier[d] )+ identifier[colors] [ identifier[y] ]. identifier[a] * identifier[d]
identifier[gradient] . identifier[append] ( identifier[color] ( identifier[r] , identifier[g] , identifier[b] , identifier[a] , identifier[mode] = literal[string] ))
identifier[gradient] . identifier[append] ( identifier[colors] [- literal[int] ])
keyword[return] identifier[gradient]
|
def _interpolate(self, colors, n=100):
""" Returns intermediary colors for given list of colors.
"""
gradient = []
for i in _range(n):
l = len(colors) - 1
x = int(1.0 * i / n * l)
x = min(x + 0, l)
y = min(x + 1, l)
base = 1.0 * n / l * x
d = (i - base) / (1.0 * n / l)
r = colors[x].r * (1 - d) + colors[y].r * d
g = colors[x].g * (1 - d) + colors[y].g * d
b = colors[x].b * (1 - d) + colors[y].b * d
a = colors[x].a * (1 - d) + colors[y].a * d
gradient.append(color(r, g, b, a, mode='rgb')) # depends on [control=['for'], data=['i']]
gradient.append(colors[-1])
return gradient
|
def highlight_project_bid(session, bid_id):
"""
Highlight a bid on a project
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
bid_data = {
'action': 'highlight'
}
# POST /api/projects/0.1/bids/{bid_id}/?action=revoke
endpoint = 'bids/{}'.format(bid_id)
response = make_put_request(session, endpoint, headers=headers,
params_data=bid_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
json_data = response.json()
raise BidNotHighlightedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id'])
|
def function[highlight_project_bid, parameter[session, bid_id]]:
constant[
Highlight a bid on a project
]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da18dc07d30>], [<ast.Constant object at 0x7da18dc07280>]]
variable[bid_data] assign[=] dictionary[[<ast.Constant object at 0x7da18f00f190>], [<ast.Constant object at 0x7da18f00ec20>]]
variable[endpoint] assign[=] call[constant[bids/{}].format, parameter[name[bid_id]]]
variable[response] assign[=] call[name[make_put_request], parameter[name[session], name[endpoint]]]
variable[json_data] assign[=] call[name[response].json, parameter[]]
if compare[name[response].status_code equal[==] constant[200]] begin[:]
return[call[name[json_data]][constant[status]]]
|
keyword[def] identifier[highlight_project_bid] ( identifier[session] , identifier[bid_id] ):
literal[string]
identifier[headers] ={
literal[string] : literal[string]
}
identifier[bid_data] ={
literal[string] : literal[string]
}
identifier[endpoint] = literal[string] . identifier[format] ( identifier[bid_id] )
identifier[response] = identifier[make_put_request] ( identifier[session] , identifier[endpoint] , identifier[headers] = identifier[headers] ,
identifier[params_data] = identifier[bid_data] )
identifier[json_data] = identifier[response] . identifier[json] ()
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[return] identifier[json_data] [ literal[string] ]
keyword[else] :
identifier[json_data] = identifier[response] . identifier[json] ()
keyword[raise] identifier[BidNotHighlightedException] ( identifier[message] = identifier[json_data] [ literal[string] ],
identifier[error_code] = identifier[json_data] [ literal[string] ],
identifier[request_id] = identifier[json_data] [ literal[string] ])
|
def highlight_project_bid(session, bid_id):
"""
Highlight a bid on a project
"""
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
bid_data = {'action': 'highlight'}
# POST /api/projects/0.1/bids/{bid_id}/?action=revoke
endpoint = 'bids/{}'.format(bid_id)
response = make_put_request(session, endpoint, headers=headers, params_data=bid_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status'] # depends on [control=['if'], data=[]]
else:
json_data = response.json()
raise BidNotHighlightedException(message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'])
|
def plot_heatmap(self, kind="final", min_freq=0.01, threshold=2, name=True, max_len=50, aspect=1, **kwargs):
"""Plot clustered heatmap of predicted motif activity.
Parameters
----------
kind : str, optional
Which data type to use for plotting. Default is 'final', which will plot the
result of the rang aggregation. Other options are 'freq' for the motif frequencies,
or any of the individual activities such as 'rf.score'.
min_freq : float, optional
Minimum frequency of motif occurrence.
threshold : float, optional
Minimum activity (absolute) of the rank aggregation result.
name : bool, optional
Use factor names instead of motif names for plotting.
max_len : int, optional
Truncate the list of factors to this maximum length.
aspect : int, optional
Aspect ratio for tweaking the plot.
kwargs : other keyword arguments
All other keyword arguments are passed to sns.clustermap
Returns
-------
cg : ClusterGrid
A seaborn ClusterGrid instance.
"""
filt = np.any(np.abs(self.result) >= threshold, 1) & np.any(np.abs(self.freq.T) >= min_freq, 1)
idx = self.result[filt].index
cmap = "RdBu_r"
if kind == "final":
data = self.result
elif kind == "freq":
data = self.freq.T
cmap = "Reds"
elif kind in self.activity:
data = self.activity[dtype]
if kind in ["hypergeom.count", "mwu.score"]:
cmap = "Reds"
else:
raise ValueError("Unknown dtype")
#print(data.head())
#plt.figure(
m = data.loc[idx]
if name:
m["factors"] = [join_max(self.motifs[n].factors, max_len, ",", suffix=",(...)") for n in m.index]
m = m.set_index("factors")
h,w = m.shape
cg = sns.clustermap(m, cmap=cmap, col_cluster=False,
figsize=(2 + w * 0.5 * aspect, 0.5 * h), linewidths=1,
**kwargs)
cg.ax_col_dendrogram.set_visible(False)
plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0);
return cg
|
def function[plot_heatmap, parameter[self, kind, min_freq, threshold, name, max_len, aspect]]:
constant[Plot clustered heatmap of predicted motif activity.
Parameters
----------
kind : str, optional
Which data type to use for plotting. Default is 'final', which will plot the
result of the rang aggregation. Other options are 'freq' for the motif frequencies,
or any of the individual activities such as 'rf.score'.
min_freq : float, optional
Minimum frequency of motif occurrence.
threshold : float, optional
Minimum activity (absolute) of the rank aggregation result.
name : bool, optional
Use factor names instead of motif names for plotting.
max_len : int, optional
Truncate the list of factors to this maximum length.
aspect : int, optional
Aspect ratio for tweaking the plot.
kwargs : other keyword arguments
All other keyword arguments are passed to sns.clustermap
Returns
-------
cg : ClusterGrid
A seaborn ClusterGrid instance.
]
variable[filt] assign[=] binary_operation[call[name[np].any, parameter[compare[call[name[np].abs, parameter[name[self].result]] greater_or_equal[>=] name[threshold]], constant[1]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[np].any, parameter[compare[call[name[np].abs, parameter[name[self].freq.T]] greater_or_equal[>=] name[min_freq]], constant[1]]]]
variable[idx] assign[=] call[name[self].result][name[filt]].index
variable[cmap] assign[=] constant[RdBu_r]
if compare[name[kind] equal[==] constant[final]] begin[:]
variable[data] assign[=] name[self].result
variable[m] assign[=] call[name[data].loc][name[idx]]
if name[name] begin[:]
call[name[m]][constant[factors]] assign[=] <ast.ListComp object at 0x7da2041d8400>
variable[m] assign[=] call[name[m].set_index, parameter[constant[factors]]]
<ast.Tuple object at 0x7da2041d8910> assign[=] name[m].shape
variable[cg] assign[=] call[name[sns].clustermap, parameter[name[m]]]
call[name[cg].ax_col_dendrogram.set_visible, parameter[constant[False]]]
call[name[plt].setp, parameter[call[name[cg].ax_heatmap.yaxis.get_majorticklabels, parameter[]]]]
return[name[cg]]
|
keyword[def] identifier[plot_heatmap] ( identifier[self] , identifier[kind] = literal[string] , identifier[min_freq] = literal[int] , identifier[threshold] = literal[int] , identifier[name] = keyword[True] , identifier[max_len] = literal[int] , identifier[aspect] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[filt] = identifier[np] . identifier[any] ( identifier[np] . identifier[abs] ( identifier[self] . identifier[result] )>= identifier[threshold] , literal[int] )& identifier[np] . identifier[any] ( identifier[np] . identifier[abs] ( identifier[self] . identifier[freq] . identifier[T] )>= identifier[min_freq] , literal[int] )
identifier[idx] = identifier[self] . identifier[result] [ identifier[filt] ]. identifier[index]
identifier[cmap] = literal[string]
keyword[if] identifier[kind] == literal[string] :
identifier[data] = identifier[self] . identifier[result]
keyword[elif] identifier[kind] == literal[string] :
identifier[data] = identifier[self] . identifier[freq] . identifier[T]
identifier[cmap] = literal[string]
keyword[elif] identifier[kind] keyword[in] identifier[self] . identifier[activity] :
identifier[data] = identifier[self] . identifier[activity] [ identifier[dtype] ]
keyword[if] identifier[kind] keyword[in] [ literal[string] , literal[string] ]:
identifier[cmap] = literal[string]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[m] = identifier[data] . identifier[loc] [ identifier[idx] ]
keyword[if] identifier[name] :
identifier[m] [ literal[string] ]=[ identifier[join_max] ( identifier[self] . identifier[motifs] [ identifier[n] ]. identifier[factors] , identifier[max_len] , literal[string] , identifier[suffix] = literal[string] ) keyword[for] identifier[n] keyword[in] identifier[m] . identifier[index] ]
identifier[m] = identifier[m] . identifier[set_index] ( literal[string] )
identifier[h] , identifier[w] = identifier[m] . identifier[shape]
identifier[cg] = identifier[sns] . identifier[clustermap] ( identifier[m] , identifier[cmap] = identifier[cmap] , identifier[col_cluster] = keyword[False] ,
identifier[figsize] =( literal[int] + identifier[w] * literal[int] * identifier[aspect] , literal[int] * identifier[h] ), identifier[linewidths] = literal[int] ,
** identifier[kwargs] )
identifier[cg] . identifier[ax_col_dendrogram] . identifier[set_visible] ( keyword[False] )
identifier[plt] . identifier[setp] ( identifier[cg] . identifier[ax_heatmap] . identifier[yaxis] . identifier[get_majorticklabels] (), identifier[rotation] = literal[int] );
keyword[return] identifier[cg]
|
def plot_heatmap(self, kind='final', min_freq=0.01, threshold=2, name=True, max_len=50, aspect=1, **kwargs):
"""Plot clustered heatmap of predicted motif activity.
Parameters
----------
kind : str, optional
Which data type to use for plotting. Default is 'final', which will plot the
result of the rang aggregation. Other options are 'freq' for the motif frequencies,
or any of the individual activities such as 'rf.score'.
min_freq : float, optional
Minimum frequency of motif occurrence.
threshold : float, optional
Minimum activity (absolute) of the rank aggregation result.
name : bool, optional
Use factor names instead of motif names for plotting.
max_len : int, optional
Truncate the list of factors to this maximum length.
aspect : int, optional
Aspect ratio for tweaking the plot.
kwargs : other keyword arguments
All other keyword arguments are passed to sns.clustermap
Returns
-------
cg : ClusterGrid
A seaborn ClusterGrid instance.
"""
filt = np.any(np.abs(self.result) >= threshold, 1) & np.any(np.abs(self.freq.T) >= min_freq, 1)
idx = self.result[filt].index
cmap = 'RdBu_r'
if kind == 'final':
data = self.result # depends on [control=['if'], data=[]]
elif kind == 'freq':
data = self.freq.T
cmap = 'Reds' # depends on [control=['if'], data=[]]
elif kind in self.activity:
data = self.activity[dtype]
if kind in ['hypergeom.count', 'mwu.score']:
cmap = 'Reds' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['kind']]
else:
raise ValueError('Unknown dtype')
#print(data.head())
#plt.figure(
m = data.loc[idx]
if name:
m['factors'] = [join_max(self.motifs[n].factors, max_len, ',', suffix=',(...)') for n in m.index]
m = m.set_index('factors') # depends on [control=['if'], data=[]]
(h, w) = m.shape
cg = sns.clustermap(m, cmap=cmap, col_cluster=False, figsize=(2 + w * 0.5 * aspect, 0.5 * h), linewidths=1, **kwargs)
cg.ax_col_dendrogram.set_visible(False)
plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
return cg
|
def set_direction(self, new_direction):
"""
Set image direction
Arguments
---------
new_direction : numpy.ndarray or tuple or list
updated direction for the image.
should have one value for each dimension
Returns
-------
None
"""
if isinstance(new_direction, (tuple,list)):
new_direction = np.asarray(new_direction)
if not isinstance(new_direction, np.ndarray):
raise ValueError('arg must be np.ndarray or tuple or list')
if len(new_direction) != self.dimension:
raise ValueError('must give a origin value for each dimension (%i)' % self.dimension)
libfn = utils.get_lib_fn('setDirection%s'%self._libsuffix)
libfn(self.pointer, new_direction)
|
def function[set_direction, parameter[self, new_direction]]:
constant[
Set image direction
Arguments
---------
new_direction : numpy.ndarray or tuple or list
updated direction for the image.
should have one value for each dimension
Returns
-------
None
]
if call[name[isinstance], parameter[name[new_direction], tuple[[<ast.Name object at 0x7da1b155fd60>, <ast.Name object at 0x7da1b155fb80>]]]] begin[:]
variable[new_direction] assign[=] call[name[np].asarray, parameter[name[new_direction]]]
if <ast.UnaryOp object at 0x7da1b155fe50> begin[:]
<ast.Raise object at 0x7da1b155d030>
if compare[call[name[len], parameter[name[new_direction]]] not_equal[!=] name[self].dimension] begin[:]
<ast.Raise object at 0x7da1b1632ec0>
variable[libfn] assign[=] call[name[utils].get_lib_fn, parameter[binary_operation[constant[setDirection%s] <ast.Mod object at 0x7da2590d6920> name[self]._libsuffix]]]
call[name[libfn], parameter[name[self].pointer, name[new_direction]]]
|
keyword[def] identifier[set_direction] ( identifier[self] , identifier[new_direction] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[new_direction] ,( identifier[tuple] , identifier[list] )):
identifier[new_direction] = identifier[np] . identifier[asarray] ( identifier[new_direction] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[new_direction] , identifier[np] . identifier[ndarray] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[new_direction] )!= identifier[self] . identifier[dimension] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[dimension] )
identifier[libfn] = identifier[utils] . identifier[get_lib_fn] ( literal[string] % identifier[self] . identifier[_libsuffix] )
identifier[libfn] ( identifier[self] . identifier[pointer] , identifier[new_direction] )
|
def set_direction(self, new_direction):
"""
Set image direction
Arguments
---------
new_direction : numpy.ndarray or tuple or list
updated direction for the image.
should have one value for each dimension
Returns
-------
None
"""
if isinstance(new_direction, (tuple, list)):
new_direction = np.asarray(new_direction) # depends on [control=['if'], data=[]]
if not isinstance(new_direction, np.ndarray):
raise ValueError('arg must be np.ndarray or tuple or list') # depends on [control=['if'], data=[]]
if len(new_direction) != self.dimension:
raise ValueError('must give a origin value for each dimension (%i)' % self.dimension) # depends on [control=['if'], data=[]]
libfn = utils.get_lib_fn('setDirection%s' % self._libsuffix)
libfn(self.pointer, new_direction)
|
def get_datacenter(conn):
'''
Return the datacenter from the config provider datacenter ID
'''
datacenter_id = get_datacenter_id()
for item in conn.list_datacenters()['items']:
if item['id'] == datacenter_id:
return item
raise SaltCloudNotFound(
'The specified datacenter \'{0}\' could not be found.'.format(
datacenter_id
)
)
|
def function[get_datacenter, parameter[conn]]:
constant[
Return the datacenter from the config provider datacenter ID
]
variable[datacenter_id] assign[=] call[name[get_datacenter_id], parameter[]]
for taget[name[item]] in starred[call[call[name[conn].list_datacenters, parameter[]]][constant[items]]] begin[:]
if compare[call[name[item]][constant[id]] equal[==] name[datacenter_id]] begin[:]
return[name[item]]
<ast.Raise object at 0x7da1b1f37430>
|
keyword[def] identifier[get_datacenter] ( identifier[conn] ):
literal[string]
identifier[datacenter_id] = identifier[get_datacenter_id] ()
keyword[for] identifier[item] keyword[in] identifier[conn] . identifier[list_datacenters] ()[ literal[string] ]:
keyword[if] identifier[item] [ literal[string] ]== identifier[datacenter_id] :
keyword[return] identifier[item]
keyword[raise] identifier[SaltCloudNotFound] (
literal[string] . identifier[format] (
identifier[datacenter_id]
)
)
|
def get_datacenter(conn):
"""
Return the datacenter from the config provider datacenter ID
"""
datacenter_id = get_datacenter_id()
for item in conn.list_datacenters()['items']:
if item['id'] == datacenter_id:
return item # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
raise SaltCloudNotFound("The specified datacenter '{0}' could not be found.".format(datacenter_id))
|
def conv_to_json(obj, fields=None):
"""
return cdx as json dictionary string
if ``fields`` is ``None``, output will include all fields
in order stored, otherwise only specified fields will be
included
:param fields: list of field names to output
"""
if fields is None:
return json_encode(OrderedDict(((x, obj[x]) for x in obj if not x.startswith('_')))) + '\n'
result = json_encode(OrderedDict([(x, obj[x]) for x in fields if x in obj])) + '\n'
return result
|
def function[conv_to_json, parameter[obj, fields]]:
constant[
return cdx as json dictionary string
if ``fields`` is ``None``, output will include all fields
in order stored, otherwise only specified fields will be
included
:param fields: list of field names to output
]
if compare[name[fields] is constant[None]] begin[:]
return[binary_operation[call[name[json_encode], parameter[call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da1b1e982e0>]]]] + constant[
]]]
variable[result] assign[=] binary_operation[call[name[json_encode], parameter[call[name[OrderedDict], parameter[<ast.ListComp object at 0x7da2041da3e0>]]]] + constant[
]]
return[name[result]]
|
keyword[def] identifier[conv_to_json] ( identifier[obj] , identifier[fields] = keyword[None] ):
literal[string]
keyword[if] identifier[fields] keyword[is] keyword[None] :
keyword[return] identifier[json_encode] ( identifier[OrderedDict] ((( identifier[x] , identifier[obj] [ identifier[x] ]) keyword[for] identifier[x] keyword[in] identifier[obj] keyword[if] keyword[not] identifier[x] . identifier[startswith] ( literal[string] ))))+ literal[string]
identifier[result] = identifier[json_encode] ( identifier[OrderedDict] ([( identifier[x] , identifier[obj] [ identifier[x] ]) keyword[for] identifier[x] keyword[in] identifier[fields] keyword[if] identifier[x] keyword[in] identifier[obj] ]))+ literal[string]
keyword[return] identifier[result]
|
def conv_to_json(obj, fields=None):
"""
return cdx as json dictionary string
if ``fields`` is ``None``, output will include all fields
in order stored, otherwise only specified fields will be
included
:param fields: list of field names to output
"""
if fields is None:
return json_encode(OrderedDict(((x, obj[x]) for x in obj if not x.startswith('_')))) + '\n' # depends on [control=['if'], data=[]]
result = json_encode(OrderedDict([(x, obj[x]) for x in fields if x in obj])) + '\n'
return result
|
def _special_method_cache(method, cache_wrapper):
"""
Because Python treats special methods differently, it's not
possible to use instance attributes to implement the cached
methods.
Instead, install the wrapper method under a different name
and return a simple proxy to that wrapper.
https://github.com/jaraco/jaraco.functools/issues/5
"""
name = method.__name__
special_names = '__getattr__', '__getitem__'
if name not in special_names:
return
wrapper_name = '__cached' + name
def proxy(self, *args, **kwargs):
if wrapper_name not in vars(self):
bound = types.MethodType(method, self)
cache = cache_wrapper(bound)
setattr(self, wrapper_name, cache)
else:
cache = getattr(self, wrapper_name)
return cache(*args, **kwargs)
return proxy
|
def function[_special_method_cache, parameter[method, cache_wrapper]]:
constant[
Because Python treats special methods differently, it's not
possible to use instance attributes to implement the cached
methods.
Instead, install the wrapper method under a different name
and return a simple proxy to that wrapper.
https://github.com/jaraco/jaraco.functools/issues/5
]
variable[name] assign[=] name[method].__name__
variable[special_names] assign[=] tuple[[<ast.Constant object at 0x7da18f09e1d0>, <ast.Constant object at 0x7da18f09cd90>]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[special_names]] begin[:]
return[None]
variable[wrapper_name] assign[=] binary_operation[constant[__cached] + name[name]]
def function[proxy, parameter[self]]:
if compare[name[wrapper_name] <ast.NotIn object at 0x7da2590d7190> call[name[vars], parameter[name[self]]]] begin[:]
variable[bound] assign[=] call[name[types].MethodType, parameter[name[method], name[self]]]
variable[cache] assign[=] call[name[cache_wrapper], parameter[name[bound]]]
call[name[setattr], parameter[name[self], name[wrapper_name], name[cache]]]
return[call[name[cache], parameter[<ast.Starred object at 0x7da18f723be0>]]]
return[name[proxy]]
|
keyword[def] identifier[_special_method_cache] ( identifier[method] , identifier[cache_wrapper] ):
literal[string]
identifier[name] = identifier[method] . identifier[__name__]
identifier[special_names] = literal[string] , literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[special_names] :
keyword[return]
identifier[wrapper_name] = literal[string] + identifier[name]
keyword[def] identifier[proxy] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[wrapper_name] keyword[not] keyword[in] identifier[vars] ( identifier[self] ):
identifier[bound] = identifier[types] . identifier[MethodType] ( identifier[method] , identifier[self] )
identifier[cache] = identifier[cache_wrapper] ( identifier[bound] )
identifier[setattr] ( identifier[self] , identifier[wrapper_name] , identifier[cache] )
keyword[else] :
identifier[cache] = identifier[getattr] ( identifier[self] , identifier[wrapper_name] )
keyword[return] identifier[cache] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[proxy]
|
def _special_method_cache(method, cache_wrapper):
"""
Because Python treats special methods differently, it's not
possible to use instance attributes to implement the cached
methods.
Instead, install the wrapper method under a different name
and return a simple proxy to that wrapper.
https://github.com/jaraco/jaraco.functools/issues/5
"""
name = method.__name__
special_names = ('__getattr__', '__getitem__')
if name not in special_names:
return # depends on [control=['if'], data=[]]
wrapper_name = '__cached' + name
def proxy(self, *args, **kwargs):
if wrapper_name not in vars(self):
bound = types.MethodType(method, self)
cache = cache_wrapper(bound)
setattr(self, wrapper_name, cache) # depends on [control=['if'], data=['wrapper_name']]
else:
cache = getattr(self, wrapper_name)
return cache(*args, **kwargs)
return proxy
|
def create_from_request_pdu(pdu):
""" Create instance from request PDU.
This method requires some clarification regarding the unpacking of
the status that are being passed to the callbacks.
A coil status can be 0 or 1. The request PDU contains at least 1 byte,
representing the status for 1 to 8 coils.
Assume a request with starting address 100, quantity set to 3 and the
value byte is 6. 0b110 is the binary reprensention of decimal 6. The
Least Significant Bit (LSB) is status of coil with starting address. So
status of coil 100 is 0, status of coil 101 is 1 and status of coil 102
is 1 too.
coil address 102 101 100
1 1 0
Again, assume starting address 100 and byte value is 6. But now
quantity is 4. So the value byte is addressing 4 coils. The binary
representation of 6 is now 0b0110. LSB again is 0, meaning status of
coil 100 is 0. Status of 101 and 102 is 1, like in the previous
example. Status of coil 104 is 0.
coil address 104 102 101 100
0 1 1 0
In short: the binary representation of the byte value is in reverse
mapped to the coil addresses. In table below you can see some more
examples.
# quantity value binary representation | 102 101 100
== ======== ===== ===================== | === === ===
01 1 0 0b0 - - 0
02 1 1 0b1 - - 1
03 2 0 0b00 - 0 0
04 2 1 0b01 - 0 1
05 2 2 0b10 - 1 0
06 2 3 0b11 - 1 1
07 3 0 0b000 0 0 0
08 3 1 0b001 0 0 1
09 3 2 0b010 0 1 0
10 3 3 0b011 0 1 1
11 3 4 0b100 1 0 0
12 3 5 0b101 1 0 1
13 3 6 0b110 1 1 0
14 3 7 0b111 1 1 1
:param pdu: A request PDU.
"""
_, starting_address, quantity, byte_count = \
struct.unpack('>BHHB', pdu[:6])
fmt = '>' + (conf.SINGLE_BIT_VALUE_FORMAT_CHARACTER * byte_count)
values = struct.unpack(fmt, pdu[6:])
res = list()
for i, value in enumerate(values):
padding = 8 if (quantity - (8 * i)) // 8 > 0 else quantity % 8
fmt = '{{0:0{padding}b}}'.format(padding=padding)
# Create binary representation of integer, convert it to a list
# and reverse the list.
res = res + [int(i) for i in fmt.format(value)][::-1]
instance = WriteMultipleCoils()
instance.starting_address = starting_address
instance.quantity = quantity
instance.values = res
return instance
|
def function[create_from_request_pdu, parameter[pdu]]:
constant[ Create instance from request PDU.
This method requires some clarification regarding the unpacking of
the status that are being passed to the callbacks.
A coil status can be 0 or 1. The request PDU contains at least 1 byte,
representing the status for 1 to 8 coils.
Assume a request with starting address 100, quantity set to 3 and the
value byte is 6. 0b110 is the binary reprensention of decimal 6. The
Least Significant Bit (LSB) is status of coil with starting address. So
status of coil 100 is 0, status of coil 101 is 1 and status of coil 102
is 1 too.
coil address 102 101 100
1 1 0
Again, assume starting address 100 and byte value is 6. But now
quantity is 4. So the value byte is addressing 4 coils. The binary
representation of 6 is now 0b0110. LSB again is 0, meaning status of
coil 100 is 0. Status of 101 and 102 is 1, like in the previous
example. Status of coil 104 is 0.
coil address 104 102 101 100
0 1 1 0
In short: the binary representation of the byte value is in reverse
mapped to the coil addresses. In table below you can see some more
examples.
# quantity value binary representation | 102 101 100
== ======== ===== ===================== | === === ===
01 1 0 0b0 - - 0
02 1 1 0b1 - - 1
03 2 0 0b00 - 0 0
04 2 1 0b01 - 0 1
05 2 2 0b10 - 1 0
06 2 3 0b11 - 1 1
07 3 0 0b000 0 0 0
08 3 1 0b001 0 0 1
09 3 2 0b010 0 1 0
10 3 3 0b011 0 1 1
11 3 4 0b100 1 0 0
12 3 5 0b101 1 0 1
13 3 6 0b110 1 1 0
14 3 7 0b111 1 1 1
:param pdu: A request PDU.
]
<ast.Tuple object at 0x7da18fe90250> assign[=] call[name[struct].unpack, parameter[constant[>BHHB], call[name[pdu]][<ast.Slice object at 0x7da18fe91030>]]]
variable[fmt] assign[=] binary_operation[constant[>] + binary_operation[name[conf].SINGLE_BIT_VALUE_FORMAT_CHARACTER * name[byte_count]]]
variable[values] assign[=] call[name[struct].unpack, parameter[name[fmt], call[name[pdu]][<ast.Slice object at 0x7da18fe907f0>]]]
variable[res] assign[=] call[name[list], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18fe92fe0>, <ast.Name object at 0x7da18fe92b00>]]] in starred[call[name[enumerate], parameter[name[values]]]] begin[:]
variable[padding] assign[=] <ast.IfExp object at 0x7da18fe91960>
variable[fmt] assign[=] call[constant[{{0:0{padding}b}}].format, parameter[]]
variable[res] assign[=] binary_operation[name[res] + call[<ast.ListComp object at 0x7da18fe90d30>][<ast.Slice object at 0x7da18fe92bf0>]]
variable[instance] assign[=] call[name[WriteMultipleCoils], parameter[]]
name[instance].starting_address assign[=] name[starting_address]
name[instance].quantity assign[=] name[quantity]
name[instance].values assign[=] name[res]
return[name[instance]]
|
keyword[def] identifier[create_from_request_pdu] ( identifier[pdu] ):
literal[string]
identifier[_] , identifier[starting_address] , identifier[quantity] , identifier[byte_count] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[pdu] [: literal[int] ])
identifier[fmt] = literal[string] +( identifier[conf] . identifier[SINGLE_BIT_VALUE_FORMAT_CHARACTER] * identifier[byte_count] )
identifier[values] = identifier[struct] . identifier[unpack] ( identifier[fmt] , identifier[pdu] [ literal[int] :])
identifier[res] = identifier[list] ()
keyword[for] identifier[i] , identifier[value] keyword[in] identifier[enumerate] ( identifier[values] ):
identifier[padding] = literal[int] keyword[if] ( identifier[quantity] -( literal[int] * identifier[i] ))// literal[int] > literal[int] keyword[else] identifier[quantity] % literal[int]
identifier[fmt] = literal[string] . identifier[format] ( identifier[padding] = identifier[padding] )
identifier[res] = identifier[res] +[ identifier[int] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[fmt] . identifier[format] ( identifier[value] )][::- literal[int] ]
identifier[instance] = identifier[WriteMultipleCoils] ()
identifier[instance] . identifier[starting_address] = identifier[starting_address]
identifier[instance] . identifier[quantity] = identifier[quantity]
identifier[instance] . identifier[values] = identifier[res]
keyword[return] identifier[instance]
|
def create_from_request_pdu(pdu):
""" Create instance from request PDU.
This method requires some clarification regarding the unpacking of
the status that are being passed to the callbacks.
A coil status can be 0 or 1. The request PDU contains at least 1 byte,
representing the status for 1 to 8 coils.
Assume a request with starting address 100, quantity set to 3 and the
value byte is 6. 0b110 is the binary reprensention of decimal 6. The
Least Significant Bit (LSB) is status of coil with starting address. So
status of coil 100 is 0, status of coil 101 is 1 and status of coil 102
is 1 too.
coil address 102 101 100
1 1 0
Again, assume starting address 100 and byte value is 6. But now
quantity is 4. So the value byte is addressing 4 coils. The binary
representation of 6 is now 0b0110. LSB again is 0, meaning status of
coil 100 is 0. Status of 101 and 102 is 1, like in the previous
example. Status of coil 104 is 0.
coil address 104 102 101 100
0 1 1 0
In short: the binary representation of the byte value is in reverse
mapped to the coil addresses. In table below you can see some more
examples.
# quantity value binary representation | 102 101 100
== ======== ===== ===================== | === === ===
01 1 0 0b0 - - 0
02 1 1 0b1 - - 1
03 2 0 0b00 - 0 0
04 2 1 0b01 - 0 1
05 2 2 0b10 - 1 0
06 2 3 0b11 - 1 1
07 3 0 0b000 0 0 0
08 3 1 0b001 0 0 1
09 3 2 0b010 0 1 0
10 3 3 0b011 0 1 1
11 3 4 0b100 1 0 0
12 3 5 0b101 1 0 1
13 3 6 0b110 1 1 0
14 3 7 0b111 1 1 1
:param pdu: A request PDU.
"""
(_, starting_address, quantity, byte_count) = struct.unpack('>BHHB', pdu[:6])
fmt = '>' + conf.SINGLE_BIT_VALUE_FORMAT_CHARACTER * byte_count
values = struct.unpack(fmt, pdu[6:])
res = list()
for (i, value) in enumerate(values):
padding = 8 if (quantity - 8 * i) // 8 > 0 else quantity % 8
fmt = '{{0:0{padding}b}}'.format(padding=padding)
# Create binary representation of integer, convert it to a list
# and reverse the list.
res = res + [int(i) for i in fmt.format(value)][::-1] # depends on [control=['for'], data=[]]
instance = WriteMultipleCoils()
instance.starting_address = starting_address
instance.quantity = quantity
instance.values = res
return instance
|
def parse_yaml(self, y):
'''Parse a YAML specification of a target port into this object.'''
super(TargetPort, self).parse_yaml(y)
self.port_name = y['portName']
return self
|
def function[parse_yaml, parameter[self, y]]:
constant[Parse a YAML specification of a target port into this object.]
call[call[name[super], parameter[name[TargetPort], name[self]]].parse_yaml, parameter[name[y]]]
name[self].port_name assign[=] call[name[y]][constant[portName]]
return[name[self]]
|
keyword[def] identifier[parse_yaml] ( identifier[self] , identifier[y] ):
literal[string]
identifier[super] ( identifier[TargetPort] , identifier[self] ). identifier[parse_yaml] ( identifier[y] )
identifier[self] . identifier[port_name] = identifier[y] [ literal[string] ]
keyword[return] identifier[self]
|
def parse_yaml(self, y):
"""Parse a YAML specification of a target port into this object."""
super(TargetPort, self).parse_yaml(y)
self.port_name = y['portName']
return self
|
def check_bounds(args, lowerLimit, upperLimit):
"""
checks whether the parameter vector has left its bound, if so, adds a big number
"""
penalty = 0
bound_hit = False
for i in range(0, len(args)):
if args[i] < lowerLimit[i] or args[i] > upperLimit[i]:
penalty = 10**15
bound_hit = True
return penalty, bound_hit
|
def function[check_bounds, parameter[args, lowerLimit, upperLimit]]:
constant[
checks whether the parameter vector has left its bound, if so, adds a big number
]
variable[penalty] assign[=] constant[0]
variable[bound_hit] assign[=] constant[False]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[args]]]]]] begin[:]
if <ast.BoolOp object at 0x7da18bcc8dc0> begin[:]
variable[penalty] assign[=] binary_operation[constant[10] ** constant[15]]
variable[bound_hit] assign[=] constant[True]
return[tuple[[<ast.Name object at 0x7da18dc9afb0>, <ast.Name object at 0x7da18dc98730>]]]
|
keyword[def] identifier[check_bounds] ( identifier[args] , identifier[lowerLimit] , identifier[upperLimit] ):
literal[string]
identifier[penalty] = literal[int]
identifier[bound_hit] = keyword[False]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[args] )):
keyword[if] identifier[args] [ identifier[i] ]< identifier[lowerLimit] [ identifier[i] ] keyword[or] identifier[args] [ identifier[i] ]> identifier[upperLimit] [ identifier[i] ]:
identifier[penalty] = literal[int] ** literal[int]
identifier[bound_hit] = keyword[True]
keyword[return] identifier[penalty] , identifier[bound_hit]
|
def check_bounds(args, lowerLimit, upperLimit):
"""
checks whether the parameter vector has left its bound, if so, adds a big number
"""
penalty = 0
bound_hit = False
for i in range(0, len(args)):
if args[i] < lowerLimit[i] or args[i] > upperLimit[i]:
penalty = 10 ** 15
bound_hit = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return (penalty, bound_hit)
|
def applyRule( self ):
"""
Applies the rule from the builder system to this line edit.
"""
widget = self.queryBuilderWidget()
if ( not widget ):
return
rule = widget.findRule(self.uiTermDDL.currentText())
self.setCurrentRule(rule)
|
def function[applyRule, parameter[self]]:
constant[
Applies the rule from the builder system to this line edit.
]
variable[widget] assign[=] call[name[self].queryBuilderWidget, parameter[]]
if <ast.UnaryOp object at 0x7da1b2469690> begin[:]
return[None]
variable[rule] assign[=] call[name[widget].findRule, parameter[call[name[self].uiTermDDL.currentText, parameter[]]]]
call[name[self].setCurrentRule, parameter[name[rule]]]
|
keyword[def] identifier[applyRule] ( identifier[self] ):
literal[string]
identifier[widget] = identifier[self] . identifier[queryBuilderWidget] ()
keyword[if] ( keyword[not] identifier[widget] ):
keyword[return]
identifier[rule] = identifier[widget] . identifier[findRule] ( identifier[self] . identifier[uiTermDDL] . identifier[currentText] ())
identifier[self] . identifier[setCurrentRule] ( identifier[rule] )
|
def applyRule(self):
"""
Applies the rule from the builder system to this line edit.
"""
widget = self.queryBuilderWidget()
if not widget:
return # depends on [control=['if'], data=[]]
rule = widget.findRule(self.uiTermDDL.currentText())
self.setCurrentRule(rule)
|
def worker(work_unit, max_sample=1000):
'''Expects a coordinate WorkUnit for DragNet and runs the following
steps:
1. scans all dossiers at the *folder* level and assembles feature
vectors for each folder -- see `make_feature`
2. trains a multinomial naive Bayes classifier that treats each
*folder* as a classifier target.
3. sample the corpus by scanning up to `max_sample` and applying
the classifier to each item to get an approx "size" of the Folder
4. Bootstrap by treating those classifier predictions as truth
data and extract the learned features that are predictive as new
query strings.
5. Put the data in kvlayer for webservice end point to return to
polling client -- see dossier.models.routes
'''
if 'config' not in work_unit.spec:
raise coordinate.exceptions.ProgrammerError(
'could not run dragnet without global config')
web_conf = Config()
unitconf = work_unit.spec['config']
with yakonfig.defaulted_config([coordinate, kvlayer, dblogger, web_conf],
config=unitconf):
labels = []
D = list()
label2fid = dict()
rejects = set()
keepers = set()
# 1. make a classifier target for each *folder*, ignoring
# subfolder structure
FT = Folders(web_conf.kvlclient)
for idx, fid in enumerate(FT.folders()):
label2fid[idx] = fid
for sid in FT.subfolders(fid):
for cid, subtopic_id in FT.items(fid, sid):
fc = web_conf.store.get(cid)
if fc:
# NB: first call to make_feature
feat, _rejects, _keepers = make_feature(fc)
else:
_rejects = {}
_keepers = {}
D.append(feat)
labels.append(idx)
rejects.update(_rejects)
keepers.update(_keepers)
logger.info('fid=%r, observation: %r', fid, cid)
# 2. Convert the StringCounters into an sklearn format and
# train MultinomialNB
logger.info('transforming...')
v = DictVectorizer(sparse=False)
X = v.fit_transform(D)
logger.info('transform fit done.')
labels = np.array(labels)
# Fit the sklearn Bernoulli Naive Bayes classifer
clf = MultinomialNB()
clf.fit(X, labels)
logger.info('fit MultinomialNB')
# 3. Scan the corpus up to max_sample putting the items into
# each target to get an approx "size" of the Folder
counts = Counter()
for cid, fc in islice(web_conf.store.scan(), max_sample):
# build the same feature vector as the training process
feat, _rejects, _keepers = make_feature(fc)
X = v.transform([feat])
# predict which folder it belongs in
target = clf.predict(X[0])[0]
# count the effective size of that folder in this sample
counts[label2fid[target]] += 1
logger.info('counts done')
## 4. Bootstrap by treating those classifier predictions as
## truth data and extract the learned features that are
## predictive as new query strings.
clusters = []
for idx in sorted(set(labels)):
logger.debug('considering cluster: %d', idx)
try:
all_features = v.inverse_transform(clf.feature_log_prob_[idx])[0]
except:
logger.warn('beyond edge on cluster %d', idx)
continue
words = Counter(all_features)
ordered = sorted(words.items(),
key=operator.itemgetter(1), reverse=True)
filtered = []
for it in ordered:
if is_bad_token(it[0]): continue
if is_username(it[0]):
logger.debug('%r is_username', it[0])
#else:
# continue
filtered.append(it)
if len(filtered) > 100: # hard cutoff
break
# normalize cluster size exponentially
biggest = exp(filtered[0][1])
# rescale all by biggest
filtered = [(key, int(round(counts[label2fid[idx]] * exp(w) / biggest))) for key, w in filtered]
# describe what we just figured out
logger.info('%s --> %r', label2fid[idx], ['%s: %d' % it for it in filtered[:10]])
# return build the JSON-serializable format for the
# DragNet UI embedded inside SortingDesk
cluster = []
cluster.append({'caption': label2fid[idx],
'weight': counts[label2fid[idx]],
'folder_id': None,
})
cluster += [{'caption': caption, 'weight': weight, 'folder_id': label2fid[idx]} for caption, weight in filtered if weight > 0]
clusters.append(cluster)
# 5. Put the data in kvlayer for webservice end point to
# return to polling client
web_conf.kvlclient.setup_namespace({'dragnet': (str,)})
web_conf.kvlclient.put('dragnet', (('dragnet',), json.dumps({'clusters': clusters})))
return dict(counts)
|
def function[worker, parameter[work_unit, max_sample]]:
constant[Expects a coordinate WorkUnit for DragNet and runs the following
steps:
1. scans all dossiers at the *folder* level and assembles feature
vectors for each folder -- see `make_feature`
2. trains a multinomial naive Bayes classifier that treats each
*folder* as a classifier target.
3. sample the corpus by scanning up to `max_sample` and applying
the classifier to each item to get an approx "size" of the Folder
4. Bootstrap by treating those classifier predictions as truth
data and extract the learned features that are predictive as new
query strings.
5. Put the data in kvlayer for webservice end point to return to
polling client -- see dossier.models.routes
]
if compare[constant[config] <ast.NotIn object at 0x7da2590d7190> name[work_unit].spec] begin[:]
<ast.Raise object at 0x7da1b15c21a0>
variable[web_conf] assign[=] call[name[Config], parameter[]]
variable[unitconf] assign[=] call[name[work_unit].spec][constant[config]]
with call[name[yakonfig].defaulted_config, parameter[list[[<ast.Name object at 0x7da1b15c0e80>, <ast.Name object at 0x7da1b15c1cc0>, <ast.Name object at 0x7da1b15c1870>, <ast.Name object at 0x7da1b15c1210>]]]] begin[:]
variable[labels] assign[=] list[[]]
variable[D] assign[=] call[name[list], parameter[]]
variable[label2fid] assign[=] call[name[dict], parameter[]]
variable[rejects] assign[=] call[name[set], parameter[]]
variable[keepers] assign[=] call[name[set], parameter[]]
variable[FT] assign[=] call[name[Folders], parameter[name[web_conf].kvlclient]]
for taget[tuple[[<ast.Name object at 0x7da1b15c16c0>, <ast.Name object at 0x7da1b15c1360>]]] in starred[call[name[enumerate], parameter[call[name[FT].folders, parameter[]]]]] begin[:]
call[name[label2fid]][name[idx]] assign[=] name[fid]
for taget[name[sid]] in starred[call[name[FT].subfolders, parameter[name[fid]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b15c1660>, <ast.Name object at 0x7da20c6e5480>]]] in starred[call[name[FT].items, parameter[name[fid], name[sid]]]] begin[:]
variable[fc] assign[=] call[name[web_conf].store.get, parameter[name[cid]]]
if name[fc] begin[:]
<ast.Tuple object at 0x7da20c6e60b0> assign[=] call[name[make_feature], parameter[name[fc]]]
call[name[D].append, parameter[name[feat]]]
call[name[labels].append, parameter[name[idx]]]
call[name[rejects].update, parameter[name[_rejects]]]
call[name[keepers].update, parameter[name[_keepers]]]
call[name[logger].info, parameter[constant[fid=%r, observation: %r], name[fid], name[cid]]]
call[name[logger].info, parameter[constant[transforming...]]]
variable[v] assign[=] call[name[DictVectorizer], parameter[]]
variable[X] assign[=] call[name[v].fit_transform, parameter[name[D]]]
call[name[logger].info, parameter[constant[transform fit done.]]]
variable[labels] assign[=] call[name[np].array, parameter[name[labels]]]
variable[clf] assign[=] call[name[MultinomialNB], parameter[]]
call[name[clf].fit, parameter[name[X], name[labels]]]
call[name[logger].info, parameter[constant[fit MultinomialNB]]]
variable[counts] assign[=] call[name[Counter], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c6e4b80>, <ast.Name object at 0x7da20c6e7f10>]]] in starred[call[name[islice], parameter[call[name[web_conf].store.scan, parameter[]], name[max_sample]]]] begin[:]
<ast.Tuple object at 0x7da20c6e76d0> assign[=] call[name[make_feature], parameter[name[fc]]]
variable[X] assign[=] call[name[v].transform, parameter[list[[<ast.Name object at 0x7da20c6e6890>]]]]
variable[target] assign[=] call[call[name[clf].predict, parameter[call[name[X]][constant[0]]]]][constant[0]]
<ast.AugAssign object at 0x7da20c6e6e60>
call[name[logger].info, parameter[constant[counts done]]]
variable[clusters] assign[=] list[[]]
for taget[name[idx]] in starred[call[name[sorted], parameter[call[name[set], parameter[name[labels]]]]]] begin[:]
call[name[logger].debug, parameter[constant[considering cluster: %d], name[idx]]]
<ast.Try object at 0x7da1b13027a0>
variable[words] assign[=] call[name[Counter], parameter[name[all_features]]]
variable[ordered] assign[=] call[name[sorted], parameter[call[name[words].items, parameter[]]]]
variable[filtered] assign[=] list[[]]
for taget[name[it]] in starred[name[ordered]] begin[:]
if call[name[is_bad_token], parameter[call[name[it]][constant[0]]]] begin[:]
continue
if call[name[is_username], parameter[call[name[it]][constant[0]]]] begin[:]
call[name[logger].debug, parameter[constant[%r is_username], call[name[it]][constant[0]]]]
call[name[filtered].append, parameter[name[it]]]
if compare[call[name[len], parameter[name[filtered]]] greater[>] constant[100]] begin[:]
break
variable[biggest] assign[=] call[name[exp], parameter[call[call[name[filtered]][constant[0]]][constant[1]]]]
variable[filtered] assign[=] <ast.ListComp object at 0x7da1b1301600>
call[name[logger].info, parameter[constant[%s --> %r], call[name[label2fid]][name[idx]], <ast.ListComp object at 0x7da1b1302830>]]
variable[cluster] assign[=] list[[]]
call[name[cluster].append, parameter[dictionary[[<ast.Constant object at 0x7da204961f30>, <ast.Constant object at 0x7da204963700>, <ast.Constant object at 0x7da2049624d0>], [<ast.Subscript object at 0x7da204961ba0>, <ast.Subscript object at 0x7da204960040>, <ast.Constant object at 0x7da204962e30>]]]]
<ast.AugAssign object at 0x7da204961c00>
call[name[clusters].append, parameter[name[cluster]]]
call[name[web_conf].kvlclient.setup_namespace, parameter[dictionary[[<ast.Constant object at 0x7da204962bf0>], [<ast.Tuple object at 0x7da2049615a0>]]]]
call[name[web_conf].kvlclient.put, parameter[constant[dragnet], tuple[[<ast.Tuple object at 0x7da204960dc0>, <ast.Call object at 0x7da204963190>]]]]
return[call[name[dict], parameter[name[counts]]]]
|
keyword[def] identifier[worker] ( identifier[work_unit] , identifier[max_sample] = literal[int] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[work_unit] . identifier[spec] :
keyword[raise] identifier[coordinate] . identifier[exceptions] . identifier[ProgrammerError] (
literal[string] )
identifier[web_conf] = identifier[Config] ()
identifier[unitconf] = identifier[work_unit] . identifier[spec] [ literal[string] ]
keyword[with] identifier[yakonfig] . identifier[defaulted_config] ([ identifier[coordinate] , identifier[kvlayer] , identifier[dblogger] , identifier[web_conf] ],
identifier[config] = identifier[unitconf] ):
identifier[labels] =[]
identifier[D] = identifier[list] ()
identifier[label2fid] = identifier[dict] ()
identifier[rejects] = identifier[set] ()
identifier[keepers] = identifier[set] ()
identifier[FT] = identifier[Folders] ( identifier[web_conf] . identifier[kvlclient] )
keyword[for] identifier[idx] , identifier[fid] keyword[in] identifier[enumerate] ( identifier[FT] . identifier[folders] ()):
identifier[label2fid] [ identifier[idx] ]= identifier[fid]
keyword[for] identifier[sid] keyword[in] identifier[FT] . identifier[subfolders] ( identifier[fid] ):
keyword[for] identifier[cid] , identifier[subtopic_id] keyword[in] identifier[FT] . identifier[items] ( identifier[fid] , identifier[sid] ):
identifier[fc] = identifier[web_conf] . identifier[store] . identifier[get] ( identifier[cid] )
keyword[if] identifier[fc] :
identifier[feat] , identifier[_rejects] , identifier[_keepers] = identifier[make_feature] ( identifier[fc] )
keyword[else] :
identifier[_rejects] ={}
identifier[_keepers] ={}
identifier[D] . identifier[append] ( identifier[feat] )
identifier[labels] . identifier[append] ( identifier[idx] )
identifier[rejects] . identifier[update] ( identifier[_rejects] )
identifier[keepers] . identifier[update] ( identifier[_keepers] )
identifier[logger] . identifier[info] ( literal[string] , identifier[fid] , identifier[cid] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[v] = identifier[DictVectorizer] ( identifier[sparse] = keyword[False] )
identifier[X] = identifier[v] . identifier[fit_transform] ( identifier[D] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[labels] = identifier[np] . identifier[array] ( identifier[labels] )
identifier[clf] = identifier[MultinomialNB] ()
identifier[clf] . identifier[fit] ( identifier[X] , identifier[labels] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[counts] = identifier[Counter] ()
keyword[for] identifier[cid] , identifier[fc] keyword[in] identifier[islice] ( identifier[web_conf] . identifier[store] . identifier[scan] (), identifier[max_sample] ):
identifier[feat] , identifier[_rejects] , identifier[_keepers] = identifier[make_feature] ( identifier[fc] )
identifier[X] = identifier[v] . identifier[transform] ([ identifier[feat] ])
identifier[target] = identifier[clf] . identifier[predict] ( identifier[X] [ literal[int] ])[ literal[int] ]
identifier[counts] [ identifier[label2fid] [ identifier[target] ]]+= literal[int]
identifier[logger] . identifier[info] ( literal[string] )
identifier[clusters] =[]
keyword[for] identifier[idx] keyword[in] identifier[sorted] ( identifier[set] ( identifier[labels] )):
identifier[logger] . identifier[debug] ( literal[string] , identifier[idx] )
keyword[try] :
identifier[all_features] = identifier[v] . identifier[inverse_transform] ( identifier[clf] . identifier[feature_log_prob_] [ identifier[idx] ])[ literal[int] ]
keyword[except] :
identifier[logger] . identifier[warn] ( literal[string] , identifier[idx] )
keyword[continue]
identifier[words] = identifier[Counter] ( identifier[all_features] )
identifier[ordered] = identifier[sorted] ( identifier[words] . identifier[items] (),
identifier[key] = identifier[operator] . identifier[itemgetter] ( literal[int] ), identifier[reverse] = keyword[True] )
identifier[filtered] =[]
keyword[for] identifier[it] keyword[in] identifier[ordered] :
keyword[if] identifier[is_bad_token] ( identifier[it] [ literal[int] ]): keyword[continue]
keyword[if] identifier[is_username] ( identifier[it] [ literal[int] ]):
identifier[logger] . identifier[debug] ( literal[string] , identifier[it] [ literal[int] ])
identifier[filtered] . identifier[append] ( identifier[it] )
keyword[if] identifier[len] ( identifier[filtered] )> literal[int] :
keyword[break]
identifier[biggest] = identifier[exp] ( identifier[filtered] [ literal[int] ][ literal[int] ])
identifier[filtered] =[( identifier[key] , identifier[int] ( identifier[round] ( identifier[counts] [ identifier[label2fid] [ identifier[idx] ]]* identifier[exp] ( identifier[w] )/ identifier[biggest] ))) keyword[for] identifier[key] , identifier[w] keyword[in] identifier[filtered] ]
identifier[logger] . identifier[info] ( literal[string] , identifier[label2fid] [ identifier[idx] ],[ literal[string] % identifier[it] keyword[for] identifier[it] keyword[in] identifier[filtered] [: literal[int] ]])
identifier[cluster] =[]
identifier[cluster] . identifier[append] ({ literal[string] : identifier[label2fid] [ identifier[idx] ],
literal[string] : identifier[counts] [ identifier[label2fid] [ identifier[idx] ]],
literal[string] : keyword[None] ,
})
identifier[cluster] +=[{ literal[string] : identifier[caption] , literal[string] : identifier[weight] , literal[string] : identifier[label2fid] [ identifier[idx] ]} keyword[for] identifier[caption] , identifier[weight] keyword[in] identifier[filtered] keyword[if] identifier[weight] > literal[int] ]
identifier[clusters] . identifier[append] ( identifier[cluster] )
identifier[web_conf] . identifier[kvlclient] . identifier[setup_namespace] ({ literal[string] :( identifier[str] ,)})
identifier[web_conf] . identifier[kvlclient] . identifier[put] ( literal[string] ,(( literal[string] ,), identifier[json] . identifier[dumps] ({ literal[string] : identifier[clusters] })))
keyword[return] identifier[dict] ( identifier[counts] )
|
def worker(work_unit, max_sample=1000):
"""Expects a coordinate WorkUnit for DragNet and runs the following
steps:
1. scans all dossiers at the *folder* level and assembles feature
vectors for each folder -- see `make_feature`
2. trains a multinomial naive Bayes classifier that treats each
*folder* as a classifier target.
3. sample the corpus by scanning up to `max_sample` and applying
the classifier to each item to get an approx "size" of the Folder
4. Bootstrap by treating those classifier predictions as truth
data and extract the learned features that are predictive as new
query strings.
5. Put the data in kvlayer for webservice end point to return to
polling client -- see dossier.models.routes
"""
if 'config' not in work_unit.spec:
raise coordinate.exceptions.ProgrammerError('could not run dragnet without global config') # depends on [control=['if'], data=[]]
web_conf = Config()
unitconf = work_unit.spec['config']
with yakonfig.defaulted_config([coordinate, kvlayer, dblogger, web_conf], config=unitconf):
labels = []
D = list()
label2fid = dict()
rejects = set()
keepers = set()
# 1. make a classifier target for each *folder*, ignoring
# subfolder structure
FT = Folders(web_conf.kvlclient)
for (idx, fid) in enumerate(FT.folders()):
label2fid[idx] = fid
for sid in FT.subfolders(fid):
for (cid, subtopic_id) in FT.items(fid, sid):
fc = web_conf.store.get(cid)
if fc:
# NB: first call to make_feature
(feat, _rejects, _keepers) = make_feature(fc) # depends on [control=['if'], data=[]]
else:
_rejects = {}
_keepers = {}
D.append(feat)
labels.append(idx)
rejects.update(_rejects)
keepers.update(_keepers)
logger.info('fid=%r, observation: %r', fid, cid) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['sid']] # depends on [control=['for'], data=[]]
# 2. Convert the StringCounters into an sklearn format and
# train MultinomialNB
logger.info('transforming...')
v = DictVectorizer(sparse=False)
X = v.fit_transform(D)
logger.info('transform fit done.')
labels = np.array(labels)
# Fit the sklearn Bernoulli Naive Bayes classifer
clf = MultinomialNB()
clf.fit(X, labels)
logger.info('fit MultinomialNB')
# 3. Scan the corpus up to max_sample putting the items into
# each target to get an approx "size" of the Folder
counts = Counter()
for (cid, fc) in islice(web_conf.store.scan(), max_sample):
# build the same feature vector as the training process
(feat, _rejects, _keepers) = make_feature(fc)
X = v.transform([feat])
# predict which folder it belongs in
target = clf.predict(X[0])[0]
# count the effective size of that folder in this sample
counts[label2fid[target]] += 1 # depends on [control=['for'], data=[]]
logger.info('counts done')
## 4. Bootstrap by treating those classifier predictions as
## truth data and extract the learned features that are
## predictive as new query strings.
clusters = []
for idx in sorted(set(labels)):
logger.debug('considering cluster: %d', idx)
try:
all_features = v.inverse_transform(clf.feature_log_prob_[idx])[0] # depends on [control=['try'], data=[]]
except:
logger.warn('beyond edge on cluster %d', idx)
continue # depends on [control=['except'], data=[]]
words = Counter(all_features)
ordered = sorted(words.items(), key=operator.itemgetter(1), reverse=True)
filtered = []
for it in ordered:
if is_bad_token(it[0]):
continue # depends on [control=['if'], data=[]]
if is_username(it[0]):
logger.debug('%r is_username', it[0]) # depends on [control=['if'], data=[]]
#else:
# continue
filtered.append(it)
if len(filtered) > 100: # hard cutoff
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['it']]
# normalize cluster size exponentially
biggest = exp(filtered[0][1])
# rescale all by biggest
filtered = [(key, int(round(counts[label2fid[idx]] * exp(w) / biggest))) for (key, w) in filtered]
# describe what we just figured out
logger.info('%s --> %r', label2fid[idx], ['%s: %d' % it for it in filtered[:10]])
# return build the JSON-serializable format for the
# DragNet UI embedded inside SortingDesk
cluster = []
cluster.append({'caption': label2fid[idx], 'weight': counts[label2fid[idx]], 'folder_id': None})
cluster += [{'caption': caption, 'weight': weight, 'folder_id': label2fid[idx]} for (caption, weight) in filtered if weight > 0]
clusters.append(cluster) # depends on [control=['for'], data=['idx']]
# 5. Put the data in kvlayer for webservice end point to
# return to polling client
web_conf.kvlclient.setup_namespace({'dragnet': (str,)})
web_conf.kvlclient.put('dragnet', (('dragnet',), json.dumps({'clusters': clusters})))
return dict(counts) # depends on [control=['with'], data=[]]
|
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(var.set_dims(dims_map) if var.dims != dims_tuple else var
for var in variables)
|
def function[broadcast_variables, parameter[]]:
constant[Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
]
variable[dims_map] assign[=] call[name[_unified_dims], parameter[name[variables]]]
variable[dims_tuple] assign[=] call[name[tuple], parameter[name[dims_map]]]
return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da18eb54dc0>]]]
|
keyword[def] identifier[broadcast_variables] (* identifier[variables] ):
literal[string]
identifier[dims_map] = identifier[_unified_dims] ( identifier[variables] )
identifier[dims_tuple] = identifier[tuple] ( identifier[dims_map] )
keyword[return] identifier[tuple] ( identifier[var] . identifier[set_dims] ( identifier[dims_map] ) keyword[if] identifier[var] . identifier[dims] != identifier[dims_tuple] keyword[else] identifier[var]
keyword[for] identifier[var] keyword[in] identifier[variables] )
|
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple((var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables))
|
def findProgram(self, builddir, program):
''' Return the builddir-relative path of program, if only a partial
path is specified. Returns None and logs an error message if the
program is ambiguous or not found
'''
# if this is an exact match, do no further checking:
if os.path.isfile(os.path.join(builddir, program)):
logging.info('found %s' % program)
return program
exact_matches = []
insensitive_matches = []
approx_matches = []
for path, dirs, files in os.walk(builddir):
if program in files:
exact_matches.append(os.path.relpath(os.path.join(path, program), builddir))
continue
files_lower = [f.lower() for f in files]
if program.lower() in files_lower:
insensitive_matches.append(
os.path.relpath(
os.path.join(path, files[files_lower.index(program.lower())]),
builddir
)
)
continue
# !!! TODO: in the future add approximate string matching (typos,
# etc.), for now we just test stripping any paths off program, and
# looking for substring matches:
pg_basen_lower_noext = os.path.splitext(os.path.basename(program).lower())[0]
for f in files_lower:
if pg_basen_lower_noext in f:
approx_matches.append(
os.path.relpath(
os.path.join(path, files[files_lower.index(f)]),
builddir
)
)
if len(exact_matches) == 1:
logging.info('found %s at %s', program, exact_matches[0])
return exact_matches[0]
elif len(exact_matches) > 1:
logging.error(
'%s matches multiple executables, please use a full path (one of %s)' % (
program,
', or '.join(['"'+os.path.join(m, program)+'"' for m in exact_matches])
)
)
return None
# if we have matches with and without a file extension, prefer the
# no-file extension version, and discard the others (so we avoid
# picking up post-processed files):
reduced_approx_matches = []
for m in approx_matches:
root = os.path.splitext(m)[0]
if (m == root) or (root not in approx_matches):
reduced_approx_matches.append(m)
approx_matches = reduced_approx_matches
for matches in (insensitive_matches, approx_matches):
if len(matches) == 1:
logging.info('found %s at %s' % (
program, matches[0]
))
return matches[0]
elif len(matches) > 1:
logging.error(
'%s is similar to several executables found. Please use an exact name:\n%s' % (
program,
'\n'.join(matches)
)
)
return None
logging.error('could not find program "%s" to debug' % program)
return None
|
def function[findProgram, parameter[self, builddir, program]]:
constant[ Return the builddir-relative path of program, if only a partial
path is specified. Returns None and logs an error message if the
program is ambiguous or not found
]
if call[name[os].path.isfile, parameter[call[name[os].path.join, parameter[name[builddir], name[program]]]]] begin[:]
call[name[logging].info, parameter[binary_operation[constant[found %s] <ast.Mod object at 0x7da2590d6920> name[program]]]]
return[name[program]]
variable[exact_matches] assign[=] list[[]]
variable[insensitive_matches] assign[=] list[[]]
variable[approx_matches] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b00d8100>, <ast.Name object at 0x7da1b00d9f90>, <ast.Name object at 0x7da1b00db190>]]] in starred[call[name[os].walk, parameter[name[builddir]]]] begin[:]
if compare[name[program] in name[files]] begin[:]
call[name[exact_matches].append, parameter[call[name[os].path.relpath, parameter[call[name[os].path.join, parameter[name[path], name[program]]], name[builddir]]]]]
continue
variable[files_lower] assign[=] <ast.ListComp object at 0x7da1b00d92a0>
if compare[call[name[program].lower, parameter[]] in name[files_lower]] begin[:]
call[name[insensitive_matches].append, parameter[call[name[os].path.relpath, parameter[call[name[os].path.join, parameter[name[path], call[name[files]][call[name[files_lower].index, parameter[call[name[program].lower, parameter[]]]]]]], name[builddir]]]]]
continue
variable[pg_basen_lower_noext] assign[=] call[call[name[os].path.splitext, parameter[call[call[name[os].path.basename, parameter[name[program]]].lower, parameter[]]]]][constant[0]]
for taget[name[f]] in starred[name[files_lower]] begin[:]
if compare[name[pg_basen_lower_noext] in name[f]] begin[:]
call[name[approx_matches].append, parameter[call[name[os].path.relpath, parameter[call[name[os].path.join, parameter[name[path], call[name[files]][call[name[files_lower].index, parameter[name[f]]]]]], name[builddir]]]]]
if compare[call[name[len], parameter[name[exact_matches]]] equal[==] constant[1]] begin[:]
call[name[logging].info, parameter[constant[found %s at %s], name[program], call[name[exact_matches]][constant[0]]]]
return[call[name[exact_matches]][constant[0]]]
variable[reduced_approx_matches] assign[=] list[[]]
for taget[name[m]] in starred[name[approx_matches]] begin[:]
variable[root] assign[=] call[call[name[os].path.splitext, parameter[name[m]]]][constant[0]]
if <ast.BoolOp object at 0x7da1b00dffa0> begin[:]
call[name[reduced_approx_matches].append, parameter[name[m]]]
variable[approx_matches] assign[=] name[reduced_approx_matches]
for taget[name[matches]] in starred[tuple[[<ast.Name object at 0x7da1b00e5fc0>, <ast.Name object at 0x7da1b00e69e0>]]] begin[:]
if compare[call[name[len], parameter[name[matches]]] equal[==] constant[1]] begin[:]
call[name[logging].info, parameter[binary_operation[constant[found %s at %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b00e20e0>, <ast.Subscript object at 0x7da1b00e3b20>]]]]]
return[call[name[matches]][constant[0]]]
call[name[logging].error, parameter[binary_operation[constant[could not find program "%s" to debug] <ast.Mod object at 0x7da2590d6920> name[program]]]]
return[constant[None]]
|
keyword[def] identifier[findProgram] ( identifier[self] , identifier[builddir] , identifier[program] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[builddir] , identifier[program] )):
identifier[logging] . identifier[info] ( literal[string] % identifier[program] )
keyword[return] identifier[program]
identifier[exact_matches] =[]
identifier[insensitive_matches] =[]
identifier[approx_matches] =[]
keyword[for] identifier[path] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[builddir] ):
keyword[if] identifier[program] keyword[in] identifier[files] :
identifier[exact_matches] . identifier[append] ( identifier[os] . identifier[path] . identifier[relpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[program] ), identifier[builddir] ))
keyword[continue]
identifier[files_lower] =[ identifier[f] . identifier[lower] () keyword[for] identifier[f] keyword[in] identifier[files] ]
keyword[if] identifier[program] . identifier[lower] () keyword[in] identifier[files_lower] :
identifier[insensitive_matches] . identifier[append] (
identifier[os] . identifier[path] . identifier[relpath] (
identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[files] [ identifier[files_lower] . identifier[index] ( identifier[program] . identifier[lower] ())]),
identifier[builddir]
)
)
keyword[continue]
identifier[pg_basen_lower_noext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[program] ). identifier[lower] ())[ literal[int] ]
keyword[for] identifier[f] keyword[in] identifier[files_lower] :
keyword[if] identifier[pg_basen_lower_noext] keyword[in] identifier[f] :
identifier[approx_matches] . identifier[append] (
identifier[os] . identifier[path] . identifier[relpath] (
identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[files] [ identifier[files_lower] . identifier[index] ( identifier[f] )]),
identifier[builddir]
)
)
keyword[if] identifier[len] ( identifier[exact_matches] )== literal[int] :
identifier[logging] . identifier[info] ( literal[string] , identifier[program] , identifier[exact_matches] [ literal[int] ])
keyword[return] identifier[exact_matches] [ literal[int] ]
keyword[elif] identifier[len] ( identifier[exact_matches] )> literal[int] :
identifier[logging] . identifier[error] (
literal[string] %(
identifier[program] ,
literal[string] . identifier[join] ([ literal[string] + identifier[os] . identifier[path] . identifier[join] ( identifier[m] , identifier[program] )+ literal[string] keyword[for] identifier[m] keyword[in] identifier[exact_matches] ])
)
)
keyword[return] keyword[None]
identifier[reduced_approx_matches] =[]
keyword[for] identifier[m] keyword[in] identifier[approx_matches] :
identifier[root] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[m] )[ literal[int] ]
keyword[if] ( identifier[m] == identifier[root] ) keyword[or] ( identifier[root] keyword[not] keyword[in] identifier[approx_matches] ):
identifier[reduced_approx_matches] . identifier[append] ( identifier[m] )
identifier[approx_matches] = identifier[reduced_approx_matches]
keyword[for] identifier[matches] keyword[in] ( identifier[insensitive_matches] , identifier[approx_matches] ):
keyword[if] identifier[len] ( identifier[matches] )== literal[int] :
identifier[logging] . identifier[info] ( literal[string] %(
identifier[program] , identifier[matches] [ literal[int] ]
))
keyword[return] identifier[matches] [ literal[int] ]
keyword[elif] identifier[len] ( identifier[matches] )> literal[int] :
identifier[logging] . identifier[error] (
literal[string] %(
identifier[program] ,
literal[string] . identifier[join] ( identifier[matches] )
)
)
keyword[return] keyword[None]
identifier[logging] . identifier[error] ( literal[string] % identifier[program] )
keyword[return] keyword[None]
|
def findProgram(self, builddir, program):
""" Return the builddir-relative path of program, if only a partial
path is specified. Returns None and logs an error message if the
program is ambiguous or not found
"""
# if this is an exact match, do no further checking:
if os.path.isfile(os.path.join(builddir, program)):
logging.info('found %s' % program)
return program # depends on [control=['if'], data=[]]
exact_matches = []
insensitive_matches = []
approx_matches = []
for (path, dirs, files) in os.walk(builddir):
if program in files:
exact_matches.append(os.path.relpath(os.path.join(path, program), builddir))
continue # depends on [control=['if'], data=['program']]
files_lower = [f.lower() for f in files]
if program.lower() in files_lower:
insensitive_matches.append(os.path.relpath(os.path.join(path, files[files_lower.index(program.lower())]), builddir))
continue # depends on [control=['if'], data=['files_lower']]
# !!! TODO: in the future add approximate string matching (typos,
# etc.), for now we just test stripping any paths off program, and
# looking for substring matches:
pg_basen_lower_noext = os.path.splitext(os.path.basename(program).lower())[0]
for f in files_lower:
if pg_basen_lower_noext in f:
approx_matches.append(os.path.relpath(os.path.join(path, files[files_lower.index(f)]), builddir)) # depends on [control=['if'], data=['f']] # depends on [control=['for'], data=['f']] # depends on [control=['for'], data=[]]
if len(exact_matches) == 1:
logging.info('found %s at %s', program, exact_matches[0])
return exact_matches[0] # depends on [control=['if'], data=[]]
elif len(exact_matches) > 1:
logging.error('%s matches multiple executables, please use a full path (one of %s)' % (program, ', or '.join(['"' + os.path.join(m, program) + '"' for m in exact_matches])))
return None # depends on [control=['if'], data=[]]
# if we have matches with and without a file extension, prefer the
# no-file extension version, and discard the others (so we avoid
# picking up post-processed files):
reduced_approx_matches = []
for m in approx_matches:
root = os.path.splitext(m)[0]
if m == root or root not in approx_matches:
reduced_approx_matches.append(m) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']]
approx_matches = reduced_approx_matches
for matches in (insensitive_matches, approx_matches):
if len(matches) == 1:
logging.info('found %s at %s' % (program, matches[0]))
return matches[0] # depends on [control=['if'], data=[]]
elif len(matches) > 1:
logging.error('%s is similar to several executables found. Please use an exact name:\n%s' % (program, '\n'.join(matches)))
return None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['matches']]
logging.error('could not find program "%s" to debug' % program)
return None
|
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in
six.iteritems(kwargs)])
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
|
def function[format_html, parameter[format_string]]:
constant[
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
]
variable[args_safe] assign[=] call[name[map], parameter[name[conditional_escape], name[args]]]
variable[kwargs_safe] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b0a68550>]]
return[call[name[mark_safe], parameter[call[name[format_string].format, parameter[<ast.Starred object at 0x7da1b0a4c7c0>]]]]]
|
keyword[def] identifier[format_html] ( identifier[format_string] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[args_safe] = identifier[map] ( identifier[conditional_escape] , identifier[args] )
identifier[kwargs_safe] = identifier[dict] ([( identifier[k] , identifier[conditional_escape] ( identifier[v] )) keyword[for] ( identifier[k] , identifier[v] ) keyword[in]
identifier[six] . identifier[iteritems] ( identifier[kwargs] )])
keyword[return] identifier[mark_safe] ( identifier[format_string] . identifier[format] (* identifier[args_safe] ,** identifier[kwargs_safe] ))
|
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in six.iteritems(kwargs)])
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
|
def items(self, folder_id, subfolder_id, ann_id=None):
'''Yields an unodered generator of items in a subfolder.
The generator yields items, which are represented by a tuple
of ``content_id`` and ``subtopic_id``. The format of these
identifiers is unspecified.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str subfolder_id: Subfolder id
:param str ann_id: Username
:rtype: generator of ``(content_id, subtopic_id)``
'''
self.assert_valid_folder_id(folder_id)
self.assert_valid_folder_id(subfolder_id)
ann_id = self._annotator(ann_id)
folder_cid = self.wrap_folder_content_id(ann_id, folder_id)
subfolder_sid = self.wrap_subfolder_subtopic_id(subfolder_id)
ident = (folder_cid, subfolder_sid)
if self.store.get(folder_cid) is None:
raise KeyError(folder_id)
for lab in self.label_store.directly_connected(ident):
cid = lab.other(folder_cid)
subid = lab.subtopic_for(cid)
yield (cid, subid)
|
def function[items, parameter[self, folder_id, subfolder_id, ann_id]]:
constant[Yields an unodered generator of items in a subfolder.
The generator yields items, which are represented by a tuple
of ``content_id`` and ``subtopic_id``. The format of these
identifiers is unspecified.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str subfolder_id: Subfolder id
:param str ann_id: Username
:rtype: generator of ``(content_id, subtopic_id)``
]
call[name[self].assert_valid_folder_id, parameter[name[folder_id]]]
call[name[self].assert_valid_folder_id, parameter[name[subfolder_id]]]
variable[ann_id] assign[=] call[name[self]._annotator, parameter[name[ann_id]]]
variable[folder_cid] assign[=] call[name[self].wrap_folder_content_id, parameter[name[ann_id], name[folder_id]]]
variable[subfolder_sid] assign[=] call[name[self].wrap_subfolder_subtopic_id, parameter[name[subfolder_id]]]
variable[ident] assign[=] tuple[[<ast.Name object at 0x7da2041d9060>, <ast.Name object at 0x7da2041d8460>]]
if compare[call[name[self].store.get, parameter[name[folder_cid]]] is constant[None]] begin[:]
<ast.Raise object at 0x7da2041d9de0>
for taget[name[lab]] in starred[call[name[self].label_store.directly_connected, parameter[name[ident]]]] begin[:]
variable[cid] assign[=] call[name[lab].other, parameter[name[folder_cid]]]
variable[subid] assign[=] call[name[lab].subtopic_for, parameter[name[cid]]]
<ast.Yield object at 0x7da2041d9270>
|
keyword[def] identifier[items] ( identifier[self] , identifier[folder_id] , identifier[subfolder_id] , identifier[ann_id] = keyword[None] ):
literal[string]
identifier[self] . identifier[assert_valid_folder_id] ( identifier[folder_id] )
identifier[self] . identifier[assert_valid_folder_id] ( identifier[subfolder_id] )
identifier[ann_id] = identifier[self] . identifier[_annotator] ( identifier[ann_id] )
identifier[folder_cid] = identifier[self] . identifier[wrap_folder_content_id] ( identifier[ann_id] , identifier[folder_id] )
identifier[subfolder_sid] = identifier[self] . identifier[wrap_subfolder_subtopic_id] ( identifier[subfolder_id] )
identifier[ident] =( identifier[folder_cid] , identifier[subfolder_sid] )
keyword[if] identifier[self] . identifier[store] . identifier[get] ( identifier[folder_cid] ) keyword[is] keyword[None] :
keyword[raise] identifier[KeyError] ( identifier[folder_id] )
keyword[for] identifier[lab] keyword[in] identifier[self] . identifier[label_store] . identifier[directly_connected] ( identifier[ident] ):
identifier[cid] = identifier[lab] . identifier[other] ( identifier[folder_cid] )
identifier[subid] = identifier[lab] . identifier[subtopic_for] ( identifier[cid] )
keyword[yield] ( identifier[cid] , identifier[subid] )
|
def items(self, folder_id, subfolder_id, ann_id=None):
"""Yields an unodered generator of items in a subfolder.
The generator yields items, which are represented by a tuple
of ``content_id`` and ``subtopic_id``. The format of these
identifiers is unspecified.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str subfolder_id: Subfolder id
:param str ann_id: Username
:rtype: generator of ``(content_id, subtopic_id)``
"""
self.assert_valid_folder_id(folder_id)
self.assert_valid_folder_id(subfolder_id)
ann_id = self._annotator(ann_id)
folder_cid = self.wrap_folder_content_id(ann_id, folder_id)
subfolder_sid = self.wrap_subfolder_subtopic_id(subfolder_id)
ident = (folder_cid, subfolder_sid)
if self.store.get(folder_cid) is None:
raise KeyError(folder_id) # depends on [control=['if'], data=[]]
for lab in self.label_store.directly_connected(ident):
cid = lab.other(folder_cid)
subid = lab.subtopic_for(cid)
yield (cid, subid) # depends on [control=['for'], data=['lab']]
|
def op_paths(self, path_base=None):
# type: (Union[str, UrlPath]) -> Generator[Tuple[UrlPath, Operation]]
"""
Return all operations stored in containers.
"""
if path_base:
path_base += self.path_prefix
else:
path_base = self.path_prefix or UrlPath()
for container in self.containers:
for op_path in container.op_paths(path_base):
yield op_path
|
def function[op_paths, parameter[self, path_base]]:
constant[
Return all operations stored in containers.
]
if name[path_base] begin[:]
<ast.AugAssign object at 0x7da1b0a70910>
for taget[name[container]] in starred[name[self].containers] begin[:]
for taget[name[op_path]] in starred[call[name[container].op_paths, parameter[name[path_base]]]] begin[:]
<ast.Yield object at 0x7da204960190>
|
keyword[def] identifier[op_paths] ( identifier[self] , identifier[path_base] = keyword[None] ):
literal[string]
keyword[if] identifier[path_base] :
identifier[path_base] += identifier[self] . identifier[path_prefix]
keyword[else] :
identifier[path_base] = identifier[self] . identifier[path_prefix] keyword[or] identifier[UrlPath] ()
keyword[for] identifier[container] keyword[in] identifier[self] . identifier[containers] :
keyword[for] identifier[op_path] keyword[in] identifier[container] . identifier[op_paths] ( identifier[path_base] ):
keyword[yield] identifier[op_path]
|
def op_paths(self, path_base=None):
# type: (Union[str, UrlPath]) -> Generator[Tuple[UrlPath, Operation]]
'\n Return all operations stored in containers.\n '
if path_base:
path_base += self.path_prefix # depends on [control=['if'], data=[]]
else:
path_base = self.path_prefix or UrlPath()
for container in self.containers:
for op_path in container.op_paths(path_base):
yield op_path # depends on [control=['for'], data=['op_path']] # depends on [control=['for'], data=['container']]
|
def append_headers(f):
"""
Appends all the web headers:
* ZSL version and information,
* default CORS if not already set up,
* cache.
:param f: The decorated function.
:return: The function which appends the web headers.
"""
@wraps(f)
def _response_decorator(*args, **kwargs):
r = f(*args, **kwargs)
response = r if isinstance(r, Response) else make_response(r)
append_all(response)
return response
return _response_decorator
|
def function[append_headers, parameter[f]]:
constant[
Appends all the web headers:
* ZSL version and information,
* default CORS if not already set up,
* cache.
:param f: The decorated function.
:return: The function which appends the web headers.
]
def function[_response_decorator, parameter[]]:
variable[r] assign[=] call[name[f], parameter[<ast.Starred object at 0x7da207f989a0>]]
variable[response] assign[=] <ast.IfExp object at 0x7da207f99c30>
call[name[append_all], parameter[name[response]]]
return[name[response]]
return[name[_response_decorator]]
|
keyword[def] identifier[append_headers] ( identifier[f] ):
literal[string]
@ identifier[wraps] ( identifier[f] )
keyword[def] identifier[_response_decorator] (* identifier[args] ,** identifier[kwargs] ):
identifier[r] = identifier[f] (* identifier[args] ,** identifier[kwargs] )
identifier[response] = identifier[r] keyword[if] identifier[isinstance] ( identifier[r] , identifier[Response] ) keyword[else] identifier[make_response] ( identifier[r] )
identifier[append_all] ( identifier[response] )
keyword[return] identifier[response]
keyword[return] identifier[_response_decorator]
|
def append_headers(f):
"""
Appends all the web headers:
* ZSL version and information,
* default CORS if not already set up,
* cache.
:param f: The decorated function.
:return: The function which appends the web headers.
"""
@wraps(f)
def _response_decorator(*args, **kwargs):
r = f(*args, **kwargs)
response = r if isinstance(r, Response) else make_response(r)
append_all(response)
return response
return _response_decorator
|
def _set_edge_loop_detection(self, v, load=False):
"""
Setter method for edge_loop_detection, mapped from YANG variable /interface/tengigabitethernet/edge_loop_detection (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edge_loop_detection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edge_loop_detection() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=edge_loop_detection.edge_loop_detection, is_container='container', presence=False, yang_name="edge-loop-detection", rest_name="edge-loop-detection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable edge-loop-detection on the selected interface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_ELD', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """edge_loop_detection must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=edge_loop_detection.edge_loop_detection, is_container='container', presence=False, yang_name="edge-loop-detection", rest_name="edge-loop-detection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable edge-loop-detection on the selected interface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_ELD', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__edge_loop_detection = t
if hasattr(self, '_set'):
self._set()
|
def function[_set_edge_loop_detection, parameter[self, v, load]]:
constant[
Setter method for edge_loop_detection, mapped from YANG variable /interface/tengigabitethernet/edge_loop_detection (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edge_loop_detection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edge_loop_detection() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20c6c5780>
name[self].__edge_loop_detection assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]]
|
keyword[def] identifier[_set_edge_loop_detection] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[edge_loop_detection] . identifier[edge_loop_detection] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__edge_loop_detection] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] ()
|
def _set_edge_loop_detection(self, v, load=False):
"""
Setter method for edge_loop_detection, mapped from YANG variable /interface/tengigabitethernet/edge_loop_detection (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edge_loop_detection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edge_loop_detection() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=edge_loop_detection.edge_loop_detection, is_container='container', presence=False, yang_name='edge-loop-detection', rest_name='edge-loop-detection', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable edge-loop-detection on the selected interface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_ELD', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'edge_loop_detection must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=edge_loop_detection.edge_loop_detection, is_container=\'container\', presence=False, yang_name="edge-loop-detection", rest_name="edge-loop-detection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Enable edge-loop-detection on the selected interface\', u\'sort-priority\': u\'RUNNCFG_INTERFACE_LEVEL_ELD\', u\'display-when\': u\'/vcsmode/vcs-mode = "true"\', u\'cli-incomplete-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-interface\', defining_module=\'brocade-interface\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__edge_loop_detection = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]]
|
def download_all(self, dst_dir=None):
"""Download all available files.
Arguments:
dst_dir -- Optional destination directory to write files to. If not
specified, then files are downloaded current directory.
Return:
Dictionary of {file_name: file_size, ..}
"""
saved = {}
save_as = None
for f in self.files():
if dst_dir:
save_as = os.path.join(dst_dir, f.split('/')[-1])
name, bytes = self.download(f, save_as)
saved[name] = bytes
return saved
|
def function[download_all, parameter[self, dst_dir]]:
constant[Download all available files.
Arguments:
dst_dir -- Optional destination directory to write files to. If not
specified, then files are downloaded current directory.
Return:
Dictionary of {file_name: file_size, ..}
]
variable[saved] assign[=] dictionary[[], []]
variable[save_as] assign[=] constant[None]
for taget[name[f]] in starred[call[name[self].files, parameter[]]] begin[:]
if name[dst_dir] begin[:]
variable[save_as] assign[=] call[name[os].path.join, parameter[name[dst_dir], call[call[name[f].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b10c1300>]]]
<ast.Tuple object at 0x7da1b10c1060> assign[=] call[name[self].download, parameter[name[f], name[save_as]]]
call[name[saved]][name[name]] assign[=] name[bytes]
return[name[saved]]
|
keyword[def] identifier[download_all] ( identifier[self] , identifier[dst_dir] = keyword[None] ):
literal[string]
identifier[saved] ={}
identifier[save_as] = keyword[None]
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[files] ():
keyword[if] identifier[dst_dir] :
identifier[save_as] = identifier[os] . identifier[path] . identifier[join] ( identifier[dst_dir] , identifier[f] . identifier[split] ( literal[string] )[- literal[int] ])
identifier[name] , identifier[bytes] = identifier[self] . identifier[download] ( identifier[f] , identifier[save_as] )
identifier[saved] [ identifier[name] ]= identifier[bytes]
keyword[return] identifier[saved]
|
def download_all(self, dst_dir=None):
"""Download all available files.
Arguments:
dst_dir -- Optional destination directory to write files to. If not
specified, then files are downloaded current directory.
Return:
Dictionary of {file_name: file_size, ..}
"""
saved = {}
save_as = None
for f in self.files():
if dst_dir:
save_as = os.path.join(dst_dir, f.split('/')[-1]) # depends on [control=['if'], data=[]]
(name, bytes) = self.download(f, save_as)
saved[name] = bytes # depends on [control=['for'], data=['f']]
return saved
|
def fake_me(cls, source):
"""
fake_me
Class or method decorator
Class decorator: create temporary table for all tests in SimpleTestCase.
Method decorator: create temporary model only for given test method.
:param source: SimpleTestCase or test function
:return:
"""
if source and type(source) == type and issubclass(source, SimpleTestCase):
return cls._class_extension(source)
elif hasattr(source, '__call__'):
return cls._decorator(source)
else:
raise AttributeError('source - must be a SimpleTestCase subclass of function')
|
def function[fake_me, parameter[cls, source]]:
constant[
fake_me
Class or method decorator
Class decorator: create temporary table for all tests in SimpleTestCase.
Method decorator: create temporary model only for given test method.
:param source: SimpleTestCase or test function
:return:
]
if <ast.BoolOp object at 0x7da20e955960> begin[:]
return[call[name[cls]._class_extension, parameter[name[source]]]]
|
keyword[def] identifier[fake_me] ( identifier[cls] , identifier[source] ):
literal[string]
keyword[if] identifier[source] keyword[and] identifier[type] ( identifier[source] )== identifier[type] keyword[and] identifier[issubclass] ( identifier[source] , identifier[SimpleTestCase] ):
keyword[return] identifier[cls] . identifier[_class_extension] ( identifier[source] )
keyword[elif] identifier[hasattr] ( identifier[source] , literal[string] ):
keyword[return] identifier[cls] . identifier[_decorator] ( identifier[source] )
keyword[else] :
keyword[raise] identifier[AttributeError] ( literal[string] )
|
def fake_me(cls, source):
"""
fake_me
Class or method decorator
Class decorator: create temporary table for all tests in SimpleTestCase.
Method decorator: create temporary model only for given test method.
:param source: SimpleTestCase or test function
:return:
"""
if source and type(source) == type and issubclass(source, SimpleTestCase):
return cls._class_extension(source) # depends on [control=['if'], data=[]]
elif hasattr(source, '__call__'):
return cls._decorator(source) # depends on [control=['if'], data=[]]
else:
raise AttributeError('source - must be a SimpleTestCase subclass of function')
|
def _authorization_headers_valid(self, token_type, token):
"""Verify authorization headers for a request.
Parameters
token_type (str)
Type of token to access resources.
token (str)
Server Token or OAuth 2.0 Access Token.
Returns
(bool)
True iff token_type and token are valid.
"""
if token_type not in http.VALID_TOKEN_TYPES:
return False
allowed_chars = ascii_letters + digits + '_' + '-' + '=' + '/' + '+'
# True if token only contains allowed_chars
return all(characters in allowed_chars for characters in token)
|
def function[_authorization_headers_valid, parameter[self, token_type, token]]:
constant[Verify authorization headers for a request.
Parameters
token_type (str)
Type of token to access resources.
token (str)
Server Token or OAuth 2.0 Access Token.
Returns
(bool)
True iff token_type and token are valid.
]
if compare[name[token_type] <ast.NotIn object at 0x7da2590d7190> name[http].VALID_TOKEN_TYPES] begin[:]
return[constant[False]]
variable[allowed_chars] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[ascii_letters] + name[digits]] + constant[_]] + constant[-]] + constant[=]] + constant[/]] + constant[+]]
return[call[name[all], parameter[<ast.GeneratorExp object at 0x7da2054a4dc0>]]]
|
keyword[def] identifier[_authorization_headers_valid] ( identifier[self] , identifier[token_type] , identifier[token] ):
literal[string]
keyword[if] identifier[token_type] keyword[not] keyword[in] identifier[http] . identifier[VALID_TOKEN_TYPES] :
keyword[return] keyword[False]
identifier[allowed_chars] = identifier[ascii_letters] + identifier[digits] + literal[string] + literal[string] + literal[string] + literal[string] + literal[string]
keyword[return] identifier[all] ( identifier[characters] keyword[in] identifier[allowed_chars] keyword[for] identifier[characters] keyword[in] identifier[token] )
|
def _authorization_headers_valid(self, token_type, token):
"""Verify authorization headers for a request.
Parameters
token_type (str)
Type of token to access resources.
token (str)
Server Token or OAuth 2.0 Access Token.
Returns
(bool)
True iff token_type and token are valid.
"""
if token_type not in http.VALID_TOKEN_TYPES:
return False # depends on [control=['if'], data=[]]
allowed_chars = ascii_letters + digits + '_' + '-' + '=' + '/' + '+'
# True if token only contains allowed_chars
return all((characters in allowed_chars for characters in token))
|
def rowcount(self):
"""
This read-only attribute specifies the number of rows that the last
.execute*() produced (for DQL statements like ``SELECT``) or affected
(for DML statements like ``UPDATE`` or ``INSERT``).
"""
if (self._closed or not self._result or "rows" not in self._result):
return -1
return self._result.get("rowcount", -1)
|
def function[rowcount, parameter[self]]:
constant[
This read-only attribute specifies the number of rows that the last
.execute*() produced (for DQL statements like ``SELECT``) or affected
(for DML statements like ``UPDATE`` or ``INSERT``).
]
if <ast.BoolOp object at 0x7da1b101b3d0> begin[:]
return[<ast.UnaryOp object at 0x7da1b101a8f0>]
return[call[name[self]._result.get, parameter[constant[rowcount], <ast.UnaryOp object at 0x7da1b2346b30>]]]
|
keyword[def] identifier[rowcount] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[_closed] keyword[or] keyword[not] identifier[self] . identifier[_result] keyword[or] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_result] ):
keyword[return] - literal[int]
keyword[return] identifier[self] . identifier[_result] . identifier[get] ( literal[string] ,- literal[int] )
|
def rowcount(self):
"""
This read-only attribute specifies the number of rows that the last
.execute*() produced (for DQL statements like ``SELECT``) or affected
(for DML statements like ``UPDATE`` or ``INSERT``).
"""
if self._closed or not self._result or 'rows' not in self._result:
return -1 # depends on [control=['if'], data=[]]
return self._result.get('rowcount', -1)
|
def parse_shape(x, pattern: str):
"""
Parse a tensor shape to dictionary mapping axes names to their lengths.
Use underscore to skip the dimension in parsing.
>>> x = np.zeros([2, 3, 5, 7])
>>> parse_shape(x, 'batch _ h w')
{'batch': 2, 'h': 5, 'w': 7}
parse_shape output can be used to specify axes_lengths for other operations
>>> y = np.zeros([700])
>>> rearrange(y, '(b c h w) -> b c h w', **parse_shape(x, 'b _ h w')).shape
(2, 10, 5, 7)
For symbolic frameworks may return symbols, not integers.
:param x: tensor of any of supported frameworks
:param pattern: str, space separated names for axes, underscore means skip axis
:return: dict, maps axes names to their lengths
"""
names = [elementary_axis for elementary_axis in pattern.split(' ') if len(elementary_axis) > 0]
shape = get_backend(x).shape(x)
if len(shape) != len(names):
raise RuntimeError("Can't parse shape with different number of dimensions: {pattern} {shape}".format(
pattern=pattern, shape=shape))
result = {}
for axis_name, axis_length in zip(names, shape):
if axis_name != '_':
result[axis_name] = axis_length
return result
|
def function[parse_shape, parameter[x, pattern]]:
constant[
Parse a tensor shape to dictionary mapping axes names to their lengths.
Use underscore to skip the dimension in parsing.
>>> x = np.zeros([2, 3, 5, 7])
>>> parse_shape(x, 'batch _ h w')
{'batch': 2, 'h': 5, 'w': 7}
parse_shape output can be used to specify axes_lengths for other operations
>>> y = np.zeros([700])
>>> rearrange(y, '(b c h w) -> b c h w', **parse_shape(x, 'b _ h w')).shape
(2, 10, 5, 7)
For symbolic frameworks may return symbols, not integers.
:param x: tensor of any of supported frameworks
:param pattern: str, space separated names for axes, underscore means skip axis
:return: dict, maps axes names to their lengths
]
variable[names] assign[=] <ast.ListComp object at 0x7da20c76fc10>
variable[shape] assign[=] call[call[name[get_backend], parameter[name[x]]].shape, parameter[name[x]]]
if compare[call[name[len], parameter[name[shape]]] not_equal[!=] call[name[len], parameter[name[names]]]] begin[:]
<ast.Raise object at 0x7da20c76e050>
variable[result] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c76d840>, <ast.Name object at 0x7da20c76cca0>]]] in starred[call[name[zip], parameter[name[names], name[shape]]]] begin[:]
if compare[name[axis_name] not_equal[!=] constant[_]] begin[:]
call[name[result]][name[axis_name]] assign[=] name[axis_length]
return[name[result]]
|
keyword[def] identifier[parse_shape] ( identifier[x] , identifier[pattern] : identifier[str] ):
literal[string]
identifier[names] =[ identifier[elementary_axis] keyword[for] identifier[elementary_axis] keyword[in] identifier[pattern] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[elementary_axis] )> literal[int] ]
identifier[shape] = identifier[get_backend] ( identifier[x] ). identifier[shape] ( identifier[x] )
keyword[if] identifier[len] ( identifier[shape] )!= identifier[len] ( identifier[names] ):
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] (
identifier[pattern] = identifier[pattern] , identifier[shape] = identifier[shape] ))
identifier[result] ={}
keyword[for] identifier[axis_name] , identifier[axis_length] keyword[in] identifier[zip] ( identifier[names] , identifier[shape] ):
keyword[if] identifier[axis_name] != literal[string] :
identifier[result] [ identifier[axis_name] ]= identifier[axis_length]
keyword[return] identifier[result]
|
def parse_shape(x, pattern: str):
"""
Parse a tensor shape to dictionary mapping axes names to their lengths.
Use underscore to skip the dimension in parsing.
>>> x = np.zeros([2, 3, 5, 7])
>>> parse_shape(x, 'batch _ h w')
{'batch': 2, 'h': 5, 'w': 7}
parse_shape output can be used to specify axes_lengths for other operations
>>> y = np.zeros([700])
>>> rearrange(y, '(b c h w) -> b c h w', **parse_shape(x, 'b _ h w')).shape
(2, 10, 5, 7)
For symbolic frameworks may return symbols, not integers.
:param x: tensor of any of supported frameworks
:param pattern: str, space separated names for axes, underscore means skip axis
:return: dict, maps axes names to their lengths
"""
names = [elementary_axis for elementary_axis in pattern.split(' ') if len(elementary_axis) > 0]
shape = get_backend(x).shape(x)
if len(shape) != len(names):
raise RuntimeError("Can't parse shape with different number of dimensions: {pattern} {shape}".format(pattern=pattern, shape=shape)) # depends on [control=['if'], data=[]]
result = {}
for (axis_name, axis_length) in zip(names, shape):
if axis_name != '_':
result[axis_name] = axis_length # depends on [control=['if'], data=['axis_name']] # depends on [control=['for'], data=[]]
return result
|
def _run_query(self, query):
"""
Run one query against BigQuery and return the result.
:param query: the query to run
:type query: str
:return: list of per-row response dicts (key => value)
:rtype: ``list``
"""
query_request = self.service.jobs()
logger.debug('Running query: %s', query)
start = datetime.now()
resp = query_request.query(
projectId=self.project_id, body={'query': query}
).execute()
duration = datetime.now() - start
logger.debug('Query response (in %s): %s', duration, resp)
if not resp['jobComplete']:
logger.error('Error: query reported job not complete!')
if int(resp['totalRows']) == 0:
return []
if int(resp['totalRows']) != len(resp['rows']):
logger.error('Error: query reported %s total rows, but only '
'returned %d', resp['totalRows'], len(resp['rows']))
data = []
fields = [f['name'] for f in resp['schema']['fields']]
for row in resp['rows']:
d = {}
for idx, val in enumerate(row['f']):
d[fields[idx]] = val['v']
data.append(d)
return data
|
def function[_run_query, parameter[self, query]]:
constant[
Run one query against BigQuery and return the result.
:param query: the query to run
:type query: str
:return: list of per-row response dicts (key => value)
:rtype: ``list``
]
variable[query_request] assign[=] call[name[self].service.jobs, parameter[]]
call[name[logger].debug, parameter[constant[Running query: %s], name[query]]]
variable[start] assign[=] call[name[datetime].now, parameter[]]
variable[resp] assign[=] call[call[name[query_request].query, parameter[]].execute, parameter[]]
variable[duration] assign[=] binary_operation[call[name[datetime].now, parameter[]] - name[start]]
call[name[logger].debug, parameter[constant[Query response (in %s): %s], name[duration], name[resp]]]
if <ast.UnaryOp object at 0x7da18f09f190> begin[:]
call[name[logger].error, parameter[constant[Error: query reported job not complete!]]]
if compare[call[name[int], parameter[call[name[resp]][constant[totalRows]]]] equal[==] constant[0]] begin[:]
return[list[[]]]
if compare[call[name[int], parameter[call[name[resp]][constant[totalRows]]]] not_equal[!=] call[name[len], parameter[call[name[resp]][constant[rows]]]]] begin[:]
call[name[logger].error, parameter[constant[Error: query reported %s total rows, but only returned %d], call[name[resp]][constant[totalRows]], call[name[len], parameter[call[name[resp]][constant[rows]]]]]]
variable[data] assign[=] list[[]]
variable[fields] assign[=] <ast.ListComp object at 0x7da18f09d330>
for taget[name[row]] in starred[call[name[resp]][constant[rows]]] begin[:]
variable[d] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2041da6b0>, <ast.Name object at 0x7da2041d9690>]]] in starred[call[name[enumerate], parameter[call[name[row]][constant[f]]]]] begin[:]
call[name[d]][call[name[fields]][name[idx]]] assign[=] call[name[val]][constant[v]]
call[name[data].append, parameter[name[d]]]
return[name[data]]
|
keyword[def] identifier[_run_query] ( identifier[self] , identifier[query] ):
literal[string]
identifier[query_request] = identifier[self] . identifier[service] . identifier[jobs] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[query] )
identifier[start] = identifier[datetime] . identifier[now] ()
identifier[resp] = identifier[query_request] . identifier[query] (
identifier[projectId] = identifier[self] . identifier[project_id] , identifier[body] ={ literal[string] : identifier[query] }
). identifier[execute] ()
identifier[duration] = identifier[datetime] . identifier[now] ()- identifier[start]
identifier[logger] . identifier[debug] ( literal[string] , identifier[duration] , identifier[resp] )
keyword[if] keyword[not] identifier[resp] [ literal[string] ]:
identifier[logger] . identifier[error] ( literal[string] )
keyword[if] identifier[int] ( identifier[resp] [ literal[string] ])== literal[int] :
keyword[return] []
keyword[if] identifier[int] ( identifier[resp] [ literal[string] ])!= identifier[len] ( identifier[resp] [ literal[string] ]):
identifier[logger] . identifier[error] ( literal[string]
literal[string] , identifier[resp] [ literal[string] ], identifier[len] ( identifier[resp] [ literal[string] ]))
identifier[data] =[]
identifier[fields] =[ identifier[f] [ literal[string] ] keyword[for] identifier[f] keyword[in] identifier[resp] [ literal[string] ][ literal[string] ]]
keyword[for] identifier[row] keyword[in] identifier[resp] [ literal[string] ]:
identifier[d] ={}
keyword[for] identifier[idx] , identifier[val] keyword[in] identifier[enumerate] ( identifier[row] [ literal[string] ]):
identifier[d] [ identifier[fields] [ identifier[idx] ]]= identifier[val] [ literal[string] ]
identifier[data] . identifier[append] ( identifier[d] )
keyword[return] identifier[data]
|
def _run_query(self, query):
"""
Run one query against BigQuery and return the result.
:param query: the query to run
:type query: str
:return: list of per-row response dicts (key => value)
:rtype: ``list``
"""
query_request = self.service.jobs()
logger.debug('Running query: %s', query)
start = datetime.now()
resp = query_request.query(projectId=self.project_id, body={'query': query}).execute()
duration = datetime.now() - start
logger.debug('Query response (in %s): %s', duration, resp)
if not resp['jobComplete']:
logger.error('Error: query reported job not complete!') # depends on [control=['if'], data=[]]
if int(resp['totalRows']) == 0:
return [] # depends on [control=['if'], data=[]]
if int(resp['totalRows']) != len(resp['rows']):
logger.error('Error: query reported %s total rows, but only returned %d', resp['totalRows'], len(resp['rows'])) # depends on [control=['if'], data=[]]
data = []
fields = [f['name'] for f in resp['schema']['fields']]
for row in resp['rows']:
d = {}
for (idx, val) in enumerate(row['f']):
d[fields[idx]] = val['v'] # depends on [control=['for'], data=[]]
data.append(d) # depends on [control=['for'], data=['row']]
return data
|
def potential_from_grid(self, grid):
"""
Calculate the potential at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
potential_grid = quad_grid(self.potential_func, 0.0, 1.0, grid,
args=(self.axis_ratio, self.slope, self.core_radius))[0]
return self.einstein_radius_rescaled * self.axis_ratio * potential_grid
|
def function[potential_from_grid, parameter[self, grid]]:
constant[
Calculate the potential at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
]
variable[potential_grid] assign[=] call[call[name[quad_grid], parameter[name[self].potential_func, constant[0.0], constant[1.0], name[grid]]]][constant[0]]
return[binary_operation[binary_operation[name[self].einstein_radius_rescaled * name[self].axis_ratio] * name[potential_grid]]]
|
keyword[def] identifier[potential_from_grid] ( identifier[self] , identifier[grid] ):
literal[string]
identifier[potential_grid] = identifier[quad_grid] ( identifier[self] . identifier[potential_func] , literal[int] , literal[int] , identifier[grid] ,
identifier[args] =( identifier[self] . identifier[axis_ratio] , identifier[self] . identifier[slope] , identifier[self] . identifier[core_radius] ))[ literal[int] ]
keyword[return] identifier[self] . identifier[einstein_radius_rescaled] * identifier[self] . identifier[axis_ratio] * identifier[potential_grid]
|
def potential_from_grid(self, grid):
"""
Calculate the potential at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
potential_grid = quad_grid(self.potential_func, 0.0, 1.0, grid, args=(self.axis_ratio, self.slope, self.core_radius))[0]
return self.einstein_radius_rescaled * self.axis_ratio * potential_grid
|
def node_to_elem(root):
"""
Convert (recursively) a Node object into an ElementTree object.
"""
def generate_elem(append, node, level):
var = "e" + str(level)
arg = repr(node.tag)
if node.attrib:
arg += ", **%r" % node.attrib
if level == 1:
append("e1 = Element(%s)" % arg)
else:
append("%s = SubElement(e%d, %s)" % (var, level - 1, arg))
if not node.nodes:
append("%s.text = %r" % (var, node.text))
for x in node:
generate_elem(append, x, level + 1)
# generate code to create a tree
output = []
generate_elem(output.append, root, 1) # print "\n".join(output)
namespace = {"Element": ElementTree.Element,
"SubElement": ElementTree.SubElement}
exec("\n".join(output), globals(), namespace)
return namespace["e1"]
|
def function[node_to_elem, parameter[root]]:
constant[
Convert (recursively) a Node object into an ElementTree object.
]
def function[generate_elem, parameter[append, node, level]]:
variable[var] assign[=] binary_operation[constant[e] + call[name[str], parameter[name[level]]]]
variable[arg] assign[=] call[name[repr], parameter[name[node].tag]]
if name[node].attrib begin[:]
<ast.AugAssign object at 0x7da207f03f40>
if compare[name[level] equal[==] constant[1]] begin[:]
call[name[append], parameter[binary_operation[constant[e1 = Element(%s)] <ast.Mod object at 0x7da2590d6920> name[arg]]]]
if <ast.UnaryOp object at 0x7da207f00be0> begin[:]
call[name[append], parameter[binary_operation[constant[%s.text = %r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f023e0>, <ast.Attribute object at 0x7da207f01de0>]]]]]
for taget[name[x]] in starred[name[node]] begin[:]
call[name[generate_elem], parameter[name[append], name[x], binary_operation[name[level] + constant[1]]]]
variable[output] assign[=] list[[]]
call[name[generate_elem], parameter[name[output].append, name[root], constant[1]]]
variable[namespace] assign[=] dictionary[[<ast.Constant object at 0x7da207f004f0>, <ast.Constant object at 0x7da207f00ee0>], [<ast.Attribute object at 0x7da207f020e0>, <ast.Attribute object at 0x7da207f02f20>]]
call[name[exec], parameter[call[constant[
].join, parameter[name[output]]], call[name[globals], parameter[]], name[namespace]]]
return[call[name[namespace]][constant[e1]]]
|
keyword[def] identifier[node_to_elem] ( identifier[root] ):
literal[string]
keyword[def] identifier[generate_elem] ( identifier[append] , identifier[node] , identifier[level] ):
identifier[var] = literal[string] + identifier[str] ( identifier[level] )
identifier[arg] = identifier[repr] ( identifier[node] . identifier[tag] )
keyword[if] identifier[node] . identifier[attrib] :
identifier[arg] += literal[string] % identifier[node] . identifier[attrib]
keyword[if] identifier[level] == literal[int] :
identifier[append] ( literal[string] % identifier[arg] )
keyword[else] :
identifier[append] ( literal[string] %( identifier[var] , identifier[level] - literal[int] , identifier[arg] ))
keyword[if] keyword[not] identifier[node] . identifier[nodes] :
identifier[append] ( literal[string] %( identifier[var] , identifier[node] . identifier[text] ))
keyword[for] identifier[x] keyword[in] identifier[node] :
identifier[generate_elem] ( identifier[append] , identifier[x] , identifier[level] + literal[int] )
identifier[output] =[]
identifier[generate_elem] ( identifier[output] . identifier[append] , identifier[root] , literal[int] )
identifier[namespace] ={ literal[string] : identifier[ElementTree] . identifier[Element] ,
literal[string] : identifier[ElementTree] . identifier[SubElement] }
identifier[exec] ( literal[string] . identifier[join] ( identifier[output] ), identifier[globals] (), identifier[namespace] )
keyword[return] identifier[namespace] [ literal[string] ]
|
def node_to_elem(root):
"""
Convert (recursively) a Node object into an ElementTree object.
"""
def generate_elem(append, node, level):
var = 'e' + str(level)
arg = repr(node.tag)
if node.attrib:
arg += ', **%r' % node.attrib # depends on [control=['if'], data=[]]
if level == 1:
append('e1 = Element(%s)' % arg) # depends on [control=['if'], data=[]]
else:
append('%s = SubElement(e%d, %s)' % (var, level - 1, arg))
if not node.nodes:
append('%s.text = %r' % (var, node.text)) # depends on [control=['if'], data=[]]
for x in node:
generate_elem(append, x, level + 1) # depends on [control=['for'], data=['x']]
# generate code to create a tree
output = []
generate_elem(output.append, root, 1) # print "\n".join(output)
namespace = {'Element': ElementTree.Element, 'SubElement': ElementTree.SubElement}
exec('\n'.join(output), globals(), namespace)
return namespace['e1']
|
def _write_gppu(self, gppu=None):
"""Write the specified byte value to the GPPU registor. If no value
specified the current buffered value will be written.
"""
if gppu is not None:
self.gppu = gppu
self.i2c.write_list(self.GPPU, self.gppu)
|
def function[_write_gppu, parameter[self, gppu]]:
constant[Write the specified byte value to the GPPU registor. If no value
specified the current buffered value will be written.
]
if compare[name[gppu] is_not constant[None]] begin[:]
name[self].gppu assign[=] name[gppu]
call[name[self].i2c.write_list, parameter[name[self].GPPU, name[self].gppu]]
|
keyword[def] identifier[_write_gppu] ( identifier[self] , identifier[gppu] = keyword[None] ):
literal[string]
keyword[if] identifier[gppu] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[gppu] = identifier[gppu]
identifier[self] . identifier[i2c] . identifier[write_list] ( identifier[self] . identifier[GPPU] , identifier[self] . identifier[gppu] )
|
def _write_gppu(self, gppu=None):
"""Write the specified byte value to the GPPU registor. If no value
specified the current buffered value will be written.
"""
if gppu is not None:
self.gppu = gppu # depends on [control=['if'], data=['gppu']]
self.i2c.write_list(self.GPPU, self.gppu)
|
def write_methods(self):
"""
Write all methods in the current module to a byte string.
:return: A bytestring of all current methods in this module
:rtype: bytes
"""
b_array = bytearray()
for key, vm_token in self.all_vm_tokens.items():
b_array.append(vm_token.out_op)
if vm_token.data is not None and vm_token.vm_op != VMOp.NOP:
b_array = b_array + vm_token.data
# self.to_s()
return b_array
|
def function[write_methods, parameter[self]]:
constant[
Write all methods in the current module to a byte string.
:return: A bytestring of all current methods in this module
:rtype: bytes
]
variable[b_array] assign[=] call[name[bytearray], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b26adba0>, <ast.Name object at 0x7da1b26ad0c0>]]] in starred[call[name[self].all_vm_tokens.items, parameter[]]] begin[:]
call[name[b_array].append, parameter[name[vm_token].out_op]]
if <ast.BoolOp object at 0x7da1b26ad090> begin[:]
variable[b_array] assign[=] binary_operation[name[b_array] + name[vm_token].data]
return[name[b_array]]
|
keyword[def] identifier[write_methods] ( identifier[self] ):
literal[string]
identifier[b_array] = identifier[bytearray] ()
keyword[for] identifier[key] , identifier[vm_token] keyword[in] identifier[self] . identifier[all_vm_tokens] . identifier[items] ():
identifier[b_array] . identifier[append] ( identifier[vm_token] . identifier[out_op] )
keyword[if] identifier[vm_token] . identifier[data] keyword[is] keyword[not] keyword[None] keyword[and] identifier[vm_token] . identifier[vm_op] != identifier[VMOp] . identifier[NOP] :
identifier[b_array] = identifier[b_array] + identifier[vm_token] . identifier[data]
keyword[return] identifier[b_array]
|
def write_methods(self):
"""
Write all methods in the current module to a byte string.
:return: A bytestring of all current methods in this module
:rtype: bytes
"""
b_array = bytearray()
for (key, vm_token) in self.all_vm_tokens.items():
b_array.append(vm_token.out_op)
if vm_token.data is not None and vm_token.vm_op != VMOp.NOP:
b_array = b_array + vm_token.data # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# self.to_s()
return b_array
|
def trim_extrema(im, h, mode='maxima'):
r"""
Trims local extrema in greyscale values by a specified amount.
This essentially decapitates peaks and/or floods valleys.
Parameters
----------
im : ND-array
The image whose extrema are to be removed
h : float
The height to remove from each peak or fill in each valley
mode : string {'maxima' | 'minima' | 'extrema'}
Specifies whether to remove maxima or minima or both
Returns
-------
image : ND-array
A copy of the input image with all the peaks and/or valleys removed.
Notes
-----
This function is referred to as **imhmax** or **imhmin** in Matlab.
"""
result = im
if mode in ['maxima', 'extrema']:
result = reconstruction(seed=im - h, mask=im, method='dilation')
elif mode in ['minima', 'extrema']:
result = reconstruction(seed=im + h, mask=im, method='erosion')
return result
|
def function[trim_extrema, parameter[im, h, mode]]:
constant[
Trims local extrema in greyscale values by a specified amount.
This essentially decapitates peaks and/or floods valleys.
Parameters
----------
im : ND-array
The image whose extrema are to be removed
h : float
The height to remove from each peak or fill in each valley
mode : string {'maxima' | 'minima' | 'extrema'}
Specifies whether to remove maxima or minima or both
Returns
-------
image : ND-array
A copy of the input image with all the peaks and/or valleys removed.
Notes
-----
This function is referred to as **imhmax** or **imhmin** in Matlab.
]
variable[result] assign[=] name[im]
if compare[name[mode] in list[[<ast.Constant object at 0x7da1b072cc70>, <ast.Constant object at 0x7da1b072ec20>]]] begin[:]
variable[result] assign[=] call[name[reconstruction], parameter[]]
return[name[result]]
|
keyword[def] identifier[trim_extrema] ( identifier[im] , identifier[h] , identifier[mode] = literal[string] ):
literal[string]
identifier[result] = identifier[im]
keyword[if] identifier[mode] keyword[in] [ literal[string] , literal[string] ]:
identifier[result] = identifier[reconstruction] ( identifier[seed] = identifier[im] - identifier[h] , identifier[mask] = identifier[im] , identifier[method] = literal[string] )
keyword[elif] identifier[mode] keyword[in] [ literal[string] , literal[string] ]:
identifier[result] = identifier[reconstruction] ( identifier[seed] = identifier[im] + identifier[h] , identifier[mask] = identifier[im] , identifier[method] = literal[string] )
keyword[return] identifier[result]
|
def trim_extrema(im, h, mode='maxima'):
"""
Trims local extrema in greyscale values by a specified amount.
This essentially decapitates peaks and/or floods valleys.
Parameters
----------
im : ND-array
The image whose extrema are to be removed
h : float
The height to remove from each peak or fill in each valley
mode : string {'maxima' | 'minima' | 'extrema'}
Specifies whether to remove maxima or minima or both
Returns
-------
image : ND-array
A copy of the input image with all the peaks and/or valleys removed.
Notes
-----
This function is referred to as **imhmax** or **imhmin** in Matlab.
"""
result = im
if mode in ['maxima', 'extrema']:
result = reconstruction(seed=im - h, mask=im, method='dilation') # depends on [control=['if'], data=[]]
elif mode in ['minima', 'extrema']:
result = reconstruction(seed=im + h, mask=im, method='erosion') # depends on [control=['if'], data=[]]
return result
|
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = create_engine(
neutron_config.database.connection,
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
|
def function[run_migrations_online, parameter[]]:
constant[Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
]
variable[engine] assign[=] call[name[create_engine], parameter[name[neutron_config].database.connection]]
variable[connection] assign[=] call[name[engine].connect, parameter[]]
call[name[context].configure, parameter[]]
<ast.Try object at 0x7da1b00fadd0>
|
keyword[def] identifier[run_migrations_online] ():
literal[string]
identifier[engine] = identifier[create_engine] (
identifier[neutron_config] . identifier[database] . identifier[connection] ,
identifier[poolclass] = identifier[pool] . identifier[NullPool] )
identifier[connection] = identifier[engine] . identifier[connect] ()
identifier[context] . identifier[configure] (
identifier[connection] = identifier[connection] ,
identifier[target_metadata] = identifier[target_metadata] )
keyword[try] :
keyword[with] identifier[context] . identifier[begin_transaction] ():
identifier[context] . identifier[run_migrations] ()
keyword[finally] :
identifier[connection] . identifier[close] ()
|
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = create_engine(neutron_config.database.connection, poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection, target_metadata=target_metadata)
try:
with context.begin_transaction():
context.run_migrations() # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
finally:
connection.close()
|
def current_day():
"""
Most recent day, if it's during the Advent of Code. Happy Holidays!
Day 1 is assumed, otherwise.
"""
aoc_now = datetime.datetime.now(tz=AOC_TZ)
if aoc_now.month != 12:
log.warning("current_day is only available in December (EST)")
return 1
day = min(aoc_now.day, 25)
return day
|
def function[current_day, parameter[]]:
constant[
Most recent day, if it's during the Advent of Code. Happy Holidays!
Day 1 is assumed, otherwise.
]
variable[aoc_now] assign[=] call[name[datetime].datetime.now, parameter[]]
if compare[name[aoc_now].month not_equal[!=] constant[12]] begin[:]
call[name[log].warning, parameter[constant[current_day is only available in December (EST)]]]
return[constant[1]]
variable[day] assign[=] call[name[min], parameter[name[aoc_now].day, constant[25]]]
return[name[day]]
|
keyword[def] identifier[current_day] ():
literal[string]
identifier[aoc_now] = identifier[datetime] . identifier[datetime] . identifier[now] ( identifier[tz] = identifier[AOC_TZ] )
keyword[if] identifier[aoc_now] . identifier[month] != literal[int] :
identifier[log] . identifier[warning] ( literal[string] )
keyword[return] literal[int]
identifier[day] = identifier[min] ( identifier[aoc_now] . identifier[day] , literal[int] )
keyword[return] identifier[day]
|
def current_day():
"""
Most recent day, if it's during the Advent of Code. Happy Holidays!
Day 1 is assumed, otherwise.
"""
aoc_now = datetime.datetime.now(tz=AOC_TZ)
if aoc_now.month != 12:
log.warning('current_day is only available in December (EST)')
return 1 # depends on [control=['if'], data=[]]
day = min(aoc_now.day, 25)
return day
|
def decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir,
log_results=True,
checkpoint_path=None):
"""Decodes once.
Args:
estimator: tf.estimator.Estimator instance. Used to generate encoded
predictions.
problem_name: str. Name of problem.
hparams: HParams instance. HParams for model training.
infer_input_fn: zero-arg function. Input function for estimator.
decode_hp: HParams instance. See decode_hparams() above.
decode_to_file: str. Prefix for filenames. Used to generated filenames to
which decoded predictions are written.
output_dir: str. Output directory. Only used for writing images.
log_results: bool. If False, return encoded predictions without any
further processing.
checkpoint_path: str. Path to load model checkpoint from. If unspecified,
Estimator's default is used.
Returns:
If decode_hp.decode_in_memory is True:
List of dicts, one per example. Values are either numpy arrays or decoded
strings.
If decode_hp.decode_in_memory is False:
An empty list.
"""
# Get the predictions as an iterable
predictions = estimator.predict(infer_input_fn,
checkpoint_path=checkpoint_path)
if not log_results:
return list(predictions)
# Prepare output file writers if decode_to_file passed
decode_to_file = decode_to_file or decode_hp.decode_to_file
if decode_to_file:
output_filepath = _decode_filename(decode_to_file, problem_name, decode_hp)
parts = output_filepath.split(".")
parts[-1] = "targets"
target_filepath = ".".join(parts)
parts[-1] = "inputs"
input_filepath = ".".join(parts)
output_file = tf.gfile.Open(output_filepath, "w")
target_file = tf.gfile.Open(target_filepath, "w")
input_file = tf.gfile.Open(input_filepath, "w")
problem_hparams = hparams.problem_hparams
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
has_input = "inputs" in problem_hparams.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = problem_hparams.vocabulary[inputs_vocab_key]
targets_vocab = problem_hparams.vocabulary["targets"]
num_eval_samples = 0
# all_outputs[i][j] = (input: str, output: str, target: str). Input,
# decoded output, and target strings for example i, beam rank j.
all_outputs = []
for num_predictions, prediction in enumerate(predictions):
num_eval_samples += 1
num_predictions += 1
inputs = prediction.get("inputs")
targets = prediction.get("targets")
outputs = prediction.get("outputs")
# Log predictions
decoded_outputs = [] # [(str, str, str)]. See all_outputs above.
if decode_hp.decode_in_memory:
all_outputs.append(decoded_outputs)
decoded_scores = []
if decode_hp.return_beams:
output_beams = np.split(outputs, decode_hp.beam_size, axis=0)
scores = None
if "scores" in prediction:
scores = np.split(prediction["scores"], decode_hp.beam_size, axis=0)
for i, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % i)
score = scores and scores[i]
decoded = log_decode_results(
inputs,
beam,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=log_results)
decoded_outputs.append(decoded)
if decode_hp.write_beam_scores:
decoded_scores.append(score)
else:
decoded = log_decode_results(
inputs,
outputs,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
decoded_outputs.append(decoded)
# Write out predictions if decode_to_file passed
if decode_to_file:
for i, (d_input, d_output, d_target) in enumerate(decoded_outputs):
# Skip if all padding
if d_input and re.match("^({})+$".format(text_encoder.PAD), d_input):
continue
beam_score_str = ""
if decode_hp.write_beam_scores:
beam_score_str = "\t%.2f" % decoded_scores[i]
output_file.write(str(d_output) + beam_score_str + decode_hp.delimiter)
target_file.write(str(d_target) + decode_hp.delimiter)
input_file.write(str(d_input) + decode_hp.delimiter)
if (decode_hp.num_samples >= 0 and
num_predictions >= decode_hp.num_samples):
break
mlperf_log.transformer_print(key=mlperf_log.EVAL_SIZE,
value=num_eval_samples,
hparams=hparams)
if decode_to_file:
output_file.close()
target_file.close()
input_file.close()
return all_outputs
|
def function[decode_once, parameter[estimator, problem_name, hparams, infer_input_fn, decode_hp, decode_to_file, output_dir, log_results, checkpoint_path]]:
constant[Decodes once.
Args:
estimator: tf.estimator.Estimator instance. Used to generate encoded
predictions.
problem_name: str. Name of problem.
hparams: HParams instance. HParams for model training.
infer_input_fn: zero-arg function. Input function for estimator.
decode_hp: HParams instance. See decode_hparams() above.
decode_to_file: str. Prefix for filenames. Used to generated filenames to
which decoded predictions are written.
output_dir: str. Output directory. Only used for writing images.
log_results: bool. If False, return encoded predictions without any
further processing.
checkpoint_path: str. Path to load model checkpoint from. If unspecified,
Estimator's default is used.
Returns:
If decode_hp.decode_in_memory is True:
List of dicts, one per example. Values are either numpy arrays or decoded
strings.
If decode_hp.decode_in_memory is False:
An empty list.
]
variable[predictions] assign[=] call[name[estimator].predict, parameter[name[infer_input_fn]]]
if <ast.UnaryOp object at 0x7da1b2347eb0> begin[:]
return[call[name[list], parameter[name[predictions]]]]
variable[decode_to_file] assign[=] <ast.BoolOp object at 0x7da1b2345510>
if name[decode_to_file] begin[:]
variable[output_filepath] assign[=] call[name[_decode_filename], parameter[name[decode_to_file], name[problem_name], name[decode_hp]]]
variable[parts] assign[=] call[name[output_filepath].split, parameter[constant[.]]]
call[name[parts]][<ast.UnaryOp object at 0x7da1b2345b70>] assign[=] constant[targets]
variable[target_filepath] assign[=] call[constant[.].join, parameter[name[parts]]]
call[name[parts]][<ast.UnaryOp object at 0x7da1b2345600>] assign[=] constant[inputs]
variable[input_filepath] assign[=] call[constant[.].join, parameter[name[parts]]]
variable[output_file] assign[=] call[name[tf].gfile.Open, parameter[name[output_filepath], constant[w]]]
variable[target_file] assign[=] call[name[tf].gfile.Open, parameter[name[target_filepath], constant[w]]]
variable[input_file] assign[=] call[name[tf].gfile.Open, parameter[name[input_filepath], constant[w]]]
variable[problem_hparams] assign[=] name[hparams].problem_hparams
variable[has_input] assign[=] compare[constant[inputs] in name[problem_hparams].vocabulary]
variable[inputs_vocab_key] assign[=] <ast.IfExp object at 0x7da1b2347580>
variable[inputs_vocab] assign[=] call[name[problem_hparams].vocabulary][name[inputs_vocab_key]]
variable[targets_vocab] assign[=] call[name[problem_hparams].vocabulary][constant[targets]]
variable[num_eval_samples] assign[=] constant[0]
variable[all_outputs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b2345750>, <ast.Name object at 0x7da1b2344dc0>]]] in starred[call[name[enumerate], parameter[name[predictions]]]] begin[:]
<ast.AugAssign object at 0x7da1b2347520>
<ast.AugAssign object at 0x7da1b23474f0>
variable[inputs] assign[=] call[name[prediction].get, parameter[constant[inputs]]]
variable[targets] assign[=] call[name[prediction].get, parameter[constant[targets]]]
variable[outputs] assign[=] call[name[prediction].get, parameter[constant[outputs]]]
variable[decoded_outputs] assign[=] list[[]]
if name[decode_hp].decode_in_memory begin[:]
call[name[all_outputs].append, parameter[name[decoded_outputs]]]
variable[decoded_scores] assign[=] list[[]]
if name[decode_hp].return_beams begin[:]
variable[output_beams] assign[=] call[name[np].split, parameter[name[outputs], name[decode_hp].beam_size]]
variable[scores] assign[=] constant[None]
if compare[constant[scores] in name[prediction]] begin[:]
variable[scores] assign[=] call[name[np].split, parameter[call[name[prediction]][constant[scores]], name[decode_hp].beam_size]]
for taget[tuple[[<ast.Name object at 0x7da1b201d990>, <ast.Name object at 0x7da1b201c9d0>]]] in starred[call[name[enumerate], parameter[name[output_beams]]]] begin[:]
call[name[tf].logging.info, parameter[binary_operation[constant[BEAM %d:] <ast.Mod object at 0x7da2590d6920> name[i]]]]
variable[score] assign[=] <ast.BoolOp object at 0x7da1b201cbe0>
variable[decoded] assign[=] call[name[log_decode_results], parameter[name[inputs], name[beam], name[problem_name], name[num_predictions], name[inputs_vocab], name[targets_vocab]]]
call[name[decoded_outputs].append, parameter[name[decoded]]]
if name[decode_hp].write_beam_scores begin[:]
call[name[decoded_scores].append, parameter[name[score]]]
if name[decode_to_file] begin[:]
for taget[tuple[[<ast.Name object at 0x7da204565ea0>, <ast.Tuple object at 0x7da204564d30>]]] in starred[call[name[enumerate], parameter[name[decoded_outputs]]]] begin[:]
if <ast.BoolOp object at 0x7da204565270> begin[:]
continue
variable[beam_score_str] assign[=] constant[]
if name[decode_hp].write_beam_scores begin[:]
variable[beam_score_str] assign[=] binary_operation[constant[ %.2f] <ast.Mod object at 0x7da2590d6920> call[name[decoded_scores]][name[i]]]
call[name[output_file].write, parameter[binary_operation[binary_operation[call[name[str], parameter[name[d_output]]] + name[beam_score_str]] + name[decode_hp].delimiter]]]
call[name[target_file].write, parameter[binary_operation[call[name[str], parameter[name[d_target]]] + name[decode_hp].delimiter]]]
call[name[input_file].write, parameter[binary_operation[call[name[str], parameter[name[d_input]]] + name[decode_hp].delimiter]]]
if <ast.BoolOp object at 0x7da18fe933a0> begin[:]
break
call[name[mlperf_log].transformer_print, parameter[]]
if name[decode_to_file] begin[:]
call[name[output_file].close, parameter[]]
call[name[target_file].close, parameter[]]
call[name[input_file].close, parameter[]]
return[name[all_outputs]]
|
keyword[def] identifier[decode_once] ( identifier[estimator] ,
identifier[problem_name] ,
identifier[hparams] ,
identifier[infer_input_fn] ,
identifier[decode_hp] ,
identifier[decode_to_file] ,
identifier[output_dir] ,
identifier[log_results] = keyword[True] ,
identifier[checkpoint_path] = keyword[None] ):
literal[string]
identifier[predictions] = identifier[estimator] . identifier[predict] ( identifier[infer_input_fn] ,
identifier[checkpoint_path] = identifier[checkpoint_path] )
keyword[if] keyword[not] identifier[log_results] :
keyword[return] identifier[list] ( identifier[predictions] )
identifier[decode_to_file] = identifier[decode_to_file] keyword[or] identifier[decode_hp] . identifier[decode_to_file]
keyword[if] identifier[decode_to_file] :
identifier[output_filepath] = identifier[_decode_filename] ( identifier[decode_to_file] , identifier[problem_name] , identifier[decode_hp] )
identifier[parts] = identifier[output_filepath] . identifier[split] ( literal[string] )
identifier[parts] [- literal[int] ]= literal[string]
identifier[target_filepath] = literal[string] . identifier[join] ( identifier[parts] )
identifier[parts] [- literal[int] ]= literal[string]
identifier[input_filepath] = literal[string] . identifier[join] ( identifier[parts] )
identifier[output_file] = identifier[tf] . identifier[gfile] . identifier[Open] ( identifier[output_filepath] , literal[string] )
identifier[target_file] = identifier[tf] . identifier[gfile] . identifier[Open] ( identifier[target_filepath] , literal[string] )
identifier[input_file] = identifier[tf] . identifier[gfile] . identifier[Open] ( identifier[input_filepath] , literal[string] )
identifier[problem_hparams] = identifier[hparams] . identifier[problem_hparams]
identifier[has_input] = literal[string] keyword[in] identifier[problem_hparams] . identifier[vocabulary]
identifier[inputs_vocab_key] = literal[string] keyword[if] identifier[has_input] keyword[else] literal[string]
identifier[inputs_vocab] = identifier[problem_hparams] . identifier[vocabulary] [ identifier[inputs_vocab_key] ]
identifier[targets_vocab] = identifier[problem_hparams] . identifier[vocabulary] [ literal[string] ]
identifier[num_eval_samples] = literal[int]
identifier[all_outputs] =[]
keyword[for] identifier[num_predictions] , identifier[prediction] keyword[in] identifier[enumerate] ( identifier[predictions] ):
identifier[num_eval_samples] += literal[int]
identifier[num_predictions] += literal[int]
identifier[inputs] = identifier[prediction] . identifier[get] ( literal[string] )
identifier[targets] = identifier[prediction] . identifier[get] ( literal[string] )
identifier[outputs] = identifier[prediction] . identifier[get] ( literal[string] )
identifier[decoded_outputs] =[]
keyword[if] identifier[decode_hp] . identifier[decode_in_memory] :
identifier[all_outputs] . identifier[append] ( identifier[decoded_outputs] )
identifier[decoded_scores] =[]
keyword[if] identifier[decode_hp] . identifier[return_beams] :
identifier[output_beams] = identifier[np] . identifier[split] ( identifier[outputs] , identifier[decode_hp] . identifier[beam_size] , identifier[axis] = literal[int] )
identifier[scores] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[prediction] :
identifier[scores] = identifier[np] . identifier[split] ( identifier[prediction] [ literal[string] ], identifier[decode_hp] . identifier[beam_size] , identifier[axis] = literal[int] )
keyword[for] identifier[i] , identifier[beam] keyword[in] identifier[enumerate] ( identifier[output_beams] ):
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] % identifier[i] )
identifier[score] = identifier[scores] keyword[and] identifier[scores] [ identifier[i] ]
identifier[decoded] = identifier[log_decode_results] (
identifier[inputs] ,
identifier[beam] ,
identifier[problem_name] ,
identifier[num_predictions] ,
identifier[inputs_vocab] ,
identifier[targets_vocab] ,
identifier[save_images] = identifier[decode_hp] . identifier[save_images] ,
identifier[output_dir] = identifier[output_dir] ,
identifier[identity_output] = identifier[decode_hp] . identifier[identity_output] ,
identifier[targets] = identifier[targets] ,
identifier[log_results] = identifier[log_results] )
identifier[decoded_outputs] . identifier[append] ( identifier[decoded] )
keyword[if] identifier[decode_hp] . identifier[write_beam_scores] :
identifier[decoded_scores] . identifier[append] ( identifier[score] )
keyword[else] :
identifier[decoded] = identifier[log_decode_results] (
identifier[inputs] ,
identifier[outputs] ,
identifier[problem_name] ,
identifier[num_predictions] ,
identifier[inputs_vocab] ,
identifier[targets_vocab] ,
identifier[save_images] = identifier[decode_hp] . identifier[save_images] ,
identifier[output_dir] = identifier[output_dir] ,
identifier[identity_output] = identifier[decode_hp] . identifier[identity_output] ,
identifier[targets] = identifier[targets] ,
identifier[log_results] = identifier[log_results] ,
identifier[skip_eos_postprocess] = identifier[decode_hp] . identifier[skip_eos_postprocess] )
identifier[decoded_outputs] . identifier[append] ( identifier[decoded] )
keyword[if] identifier[decode_to_file] :
keyword[for] identifier[i] ,( identifier[d_input] , identifier[d_output] , identifier[d_target] ) keyword[in] identifier[enumerate] ( identifier[decoded_outputs] ):
keyword[if] identifier[d_input] keyword[and] identifier[re] . identifier[match] ( literal[string] . identifier[format] ( identifier[text_encoder] . identifier[PAD] ), identifier[d_input] ):
keyword[continue]
identifier[beam_score_str] = literal[string]
keyword[if] identifier[decode_hp] . identifier[write_beam_scores] :
identifier[beam_score_str] = literal[string] % identifier[decoded_scores] [ identifier[i] ]
identifier[output_file] . identifier[write] ( identifier[str] ( identifier[d_output] )+ identifier[beam_score_str] + identifier[decode_hp] . identifier[delimiter] )
identifier[target_file] . identifier[write] ( identifier[str] ( identifier[d_target] )+ identifier[decode_hp] . identifier[delimiter] )
identifier[input_file] . identifier[write] ( identifier[str] ( identifier[d_input] )+ identifier[decode_hp] . identifier[delimiter] )
keyword[if] ( identifier[decode_hp] . identifier[num_samples] >= literal[int] keyword[and]
identifier[num_predictions] >= identifier[decode_hp] . identifier[num_samples] ):
keyword[break]
identifier[mlperf_log] . identifier[transformer_print] ( identifier[key] = identifier[mlperf_log] . identifier[EVAL_SIZE] ,
identifier[value] = identifier[num_eval_samples] ,
identifier[hparams] = identifier[hparams] )
keyword[if] identifier[decode_to_file] :
identifier[output_file] . identifier[close] ()
identifier[target_file] . identifier[close] ()
identifier[input_file] . identifier[close] ()
keyword[return] identifier[all_outputs]
|
def decode_once(estimator, problem_name, hparams, infer_input_fn, decode_hp, decode_to_file, output_dir, log_results=True, checkpoint_path=None):
"""Decodes once.
Args:
estimator: tf.estimator.Estimator instance. Used to generate encoded
predictions.
problem_name: str. Name of problem.
hparams: HParams instance. HParams for model training.
infer_input_fn: zero-arg function. Input function for estimator.
decode_hp: HParams instance. See decode_hparams() above.
decode_to_file: str. Prefix for filenames. Used to generated filenames to
which decoded predictions are written.
output_dir: str. Output directory. Only used for writing images.
log_results: bool. If False, return encoded predictions without any
further processing.
checkpoint_path: str. Path to load model checkpoint from. If unspecified,
Estimator's default is used.
Returns:
If decode_hp.decode_in_memory is True:
List of dicts, one per example. Values are either numpy arrays or decoded
strings.
If decode_hp.decode_in_memory is False:
An empty list.
"""
# Get the predictions as an iterable
predictions = estimator.predict(infer_input_fn, checkpoint_path=checkpoint_path)
if not log_results:
return list(predictions) # depends on [control=['if'], data=[]]
# Prepare output file writers if decode_to_file passed
decode_to_file = decode_to_file or decode_hp.decode_to_file
if decode_to_file:
output_filepath = _decode_filename(decode_to_file, problem_name, decode_hp)
parts = output_filepath.split('.')
parts[-1] = 'targets'
target_filepath = '.'.join(parts)
parts[-1] = 'inputs'
input_filepath = '.'.join(parts)
output_file = tf.gfile.Open(output_filepath, 'w')
target_file = tf.gfile.Open(target_filepath, 'w')
input_file = tf.gfile.Open(input_filepath, 'w') # depends on [control=['if'], data=[]]
problem_hparams = hparams.problem_hparams
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
has_input = 'inputs' in problem_hparams.vocabulary
inputs_vocab_key = 'inputs' if has_input else 'targets'
inputs_vocab = problem_hparams.vocabulary[inputs_vocab_key]
targets_vocab = problem_hparams.vocabulary['targets']
num_eval_samples = 0
# all_outputs[i][j] = (input: str, output: str, target: str). Input,
# decoded output, and target strings for example i, beam rank j.
all_outputs = []
for (num_predictions, prediction) in enumerate(predictions):
num_eval_samples += 1
num_predictions += 1
inputs = prediction.get('inputs')
targets = prediction.get('targets')
outputs = prediction.get('outputs')
# Log predictions
decoded_outputs = [] # [(str, str, str)]. See all_outputs above.
if decode_hp.decode_in_memory:
all_outputs.append(decoded_outputs) # depends on [control=['if'], data=[]]
decoded_scores = []
if decode_hp.return_beams:
output_beams = np.split(outputs, decode_hp.beam_size, axis=0)
scores = None
if 'scores' in prediction:
scores = np.split(prediction['scores'], decode_hp.beam_size, axis=0) # depends on [control=['if'], data=['prediction']]
for (i, beam) in enumerate(output_beams):
tf.logging.info('BEAM %d:' % i)
score = scores and scores[i]
decoded = log_decode_results(inputs, beam, problem_name, num_predictions, inputs_vocab, targets_vocab, save_images=decode_hp.save_images, output_dir=output_dir, identity_output=decode_hp.identity_output, targets=targets, log_results=log_results)
decoded_outputs.append(decoded)
if decode_hp.write_beam_scores:
decoded_scores.append(score) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
decoded = log_decode_results(inputs, outputs, problem_name, num_predictions, inputs_vocab, targets_vocab, save_images=decode_hp.save_images, output_dir=output_dir, identity_output=decode_hp.identity_output, targets=targets, log_results=log_results, skip_eos_postprocess=decode_hp.skip_eos_postprocess)
decoded_outputs.append(decoded)
# Write out predictions if decode_to_file passed
if decode_to_file:
for (i, (d_input, d_output, d_target)) in enumerate(decoded_outputs):
# Skip if all padding
if d_input and re.match('^({})+$'.format(text_encoder.PAD), d_input):
continue # depends on [control=['if'], data=[]]
beam_score_str = ''
if decode_hp.write_beam_scores:
beam_score_str = '\t%.2f' % decoded_scores[i] # depends on [control=['if'], data=[]]
output_file.write(str(d_output) + beam_score_str + decode_hp.delimiter)
target_file.write(str(d_target) + decode_hp.delimiter)
input_file.write(str(d_input) + decode_hp.delimiter) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if decode_hp.num_samples >= 0 and num_predictions >= decode_hp.num_samples:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
mlperf_log.transformer_print(key=mlperf_log.EVAL_SIZE, value=num_eval_samples, hparams=hparams)
if decode_to_file:
output_file.close()
target_file.close()
input_file.close() # depends on [control=['if'], data=[]]
return all_outputs
|
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
|
def function[get_datacenters, parameter[service_instance, datacenter_names, get_all_datacenters]]:
constant[
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
]
variable[items] assign[=] <ast.ListComp object at 0x7da1b1c14820>
return[name[items]]
|
keyword[def] identifier[get_datacenters] ( identifier[service_instance] , identifier[datacenter_names] = keyword[None] ,
identifier[get_all_datacenters] = keyword[False] ):
literal[string]
identifier[items] =[ identifier[i] [ literal[string] ] keyword[for] identifier[i] keyword[in]
identifier[get_mors_with_properties] ( identifier[service_instance] ,
identifier[vim] . identifier[Datacenter] ,
identifier[property_list] =[ literal[string] ])
keyword[if] identifier[get_all_datacenters] keyword[or]
( identifier[datacenter_names] keyword[and] identifier[i] [ literal[string] ] keyword[in] identifier[datacenter_names] )]
keyword[return] identifier[items]
|
def get_datacenters(service_instance, datacenter_names=None, get_all_datacenters=False):
"""
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
"""
items = [i['object'] for i in get_mors_with_properties(service_instance, vim.Datacenter, property_list=['name']) if get_all_datacenters or (datacenter_names and i['name'] in datacenter_names)]
return items
|
def export_event_based_gateway_info(node_params, output_element):
"""
Adds EventBasedGateway node attributes to exported XML element
:param node_params: dictionary with given event based gateway parameters,
:param output_element: object representing BPMN XML 'eventBasedGateway' element.
"""
output_element.set(consts.Consts.gateway_direction, node_params[consts.Consts.gateway_direction])
output_element.set(consts.Consts.instantiate, node_params[consts.Consts.instantiate])
output_element.set(consts.Consts.event_gateway_type, node_params[consts.Consts.event_gateway_type])
|
def function[export_event_based_gateway_info, parameter[node_params, output_element]]:
constant[
Adds EventBasedGateway node attributes to exported XML element
:param node_params: dictionary with given event based gateway parameters,
:param output_element: object representing BPMN XML 'eventBasedGateway' element.
]
call[name[output_element].set, parameter[name[consts].Consts.gateway_direction, call[name[node_params]][name[consts].Consts.gateway_direction]]]
call[name[output_element].set, parameter[name[consts].Consts.instantiate, call[name[node_params]][name[consts].Consts.instantiate]]]
call[name[output_element].set, parameter[name[consts].Consts.event_gateway_type, call[name[node_params]][name[consts].Consts.event_gateway_type]]]
|
keyword[def] identifier[export_event_based_gateway_info] ( identifier[node_params] , identifier[output_element] ):
literal[string]
identifier[output_element] . identifier[set] ( identifier[consts] . identifier[Consts] . identifier[gateway_direction] , identifier[node_params] [ identifier[consts] . identifier[Consts] . identifier[gateway_direction] ])
identifier[output_element] . identifier[set] ( identifier[consts] . identifier[Consts] . identifier[instantiate] , identifier[node_params] [ identifier[consts] . identifier[Consts] . identifier[instantiate] ])
identifier[output_element] . identifier[set] ( identifier[consts] . identifier[Consts] . identifier[event_gateway_type] , identifier[node_params] [ identifier[consts] . identifier[Consts] . identifier[event_gateway_type] ])
|
def export_event_based_gateway_info(node_params, output_element):
"""
Adds EventBasedGateway node attributes to exported XML element
:param node_params: dictionary with given event based gateway parameters,
:param output_element: object representing BPMN XML 'eventBasedGateway' element.
"""
output_element.set(consts.Consts.gateway_direction, node_params[consts.Consts.gateway_direction])
output_element.set(consts.Consts.instantiate, node_params[consts.Consts.instantiate])
output_element.set(consts.Consts.event_gateway_type, node_params[consts.Consts.event_gateway_type])
|
def choose_boundary():
"""Generate a multipart boundry.
:returns: A boundary string
"""
global BOUNDARY_PREFIX
if BOUNDARY_PREFIX is None:
BOUNDARY_PREFIX = "urlfetch"
try:
uid = repr(os.getuid())
BOUNDARY_PREFIX += "." + uid
except AttributeError:
pass
try:
pid = repr(os.getpid())
BOUNDARY_PREFIX += "." + pid
except AttributeError:
pass
return "%s.%s" % (BOUNDARY_PREFIX, uuid.uuid4().hex)
|
def function[choose_boundary, parameter[]]:
constant[Generate a multipart boundry.
:returns: A boundary string
]
<ast.Global object at 0x7da1b25e7b80>
if compare[name[BOUNDARY_PREFIX] is constant[None]] begin[:]
variable[BOUNDARY_PREFIX] assign[=] constant[urlfetch]
<ast.Try object at 0x7da1b25e7700>
<ast.Try object at 0x7da1b24373d0>
return[binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b24377c0>, <ast.Attribute object at 0x7da1b24348b0>]]]]
|
keyword[def] identifier[choose_boundary] ():
literal[string]
keyword[global] identifier[BOUNDARY_PREFIX]
keyword[if] identifier[BOUNDARY_PREFIX] keyword[is] keyword[None] :
identifier[BOUNDARY_PREFIX] = literal[string]
keyword[try] :
identifier[uid] = identifier[repr] ( identifier[os] . identifier[getuid] ())
identifier[BOUNDARY_PREFIX] += literal[string] + identifier[uid]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[try] :
identifier[pid] = identifier[repr] ( identifier[os] . identifier[getpid] ())
identifier[BOUNDARY_PREFIX] += literal[string] + identifier[pid]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[return] literal[string] %( identifier[BOUNDARY_PREFIX] , identifier[uuid] . identifier[uuid4] (). identifier[hex] )
|
def choose_boundary():
"""Generate a multipart boundry.
:returns: A boundary string
"""
global BOUNDARY_PREFIX
if BOUNDARY_PREFIX is None:
BOUNDARY_PREFIX = 'urlfetch'
try:
uid = repr(os.getuid())
BOUNDARY_PREFIX += '.' + uid # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
try:
pid = repr(os.getpid())
BOUNDARY_PREFIX += '.' + pid # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['BOUNDARY_PREFIX']]
return '%s.%s' % (BOUNDARY_PREFIX, uuid.uuid4().hex)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.