code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
txt = ""
for c in self.contracts:
(name, _inheritance, _var, func_summaries, _modif_summaries) = c.get_summary()
txt += blue("\n+ Contract %s\n"%name)
# (c_name, f_name, visi, _, _, _, _, _) in func_summaries
public = [(elem[0], (elem[1], elem[2]) ) for elem in func_summaries]
collect = collections.defaultdict(list)
for a,b in public:
collect[a].append(b)
public = list(collect.items())
for contract, functions in public:
txt += blue(" - From {}\n".format(contract))
functions = sorted(functions)
for (function, visi) in functions:
if visi in ['external', 'public']:
txt += green(" - {} ({})\n".format(function, visi))
for (function, visi) in functions:
if visi in ['internal', 'private']:
txt += magenta(" - {} ({})\n".format(function, visi))
for (function, visi) in functions:
if visi not in ['external', 'public', 'internal', 'private']:
txt += " - {} ({})\n".format(function, visi)
self.info(txt)
|
def function[output, parameter[self, _filename]]:
constant[
_filename is not used
Args:
_filename(string)
]
variable[txt] assign[=] constant[]
for taget[name[c]] in starred[name[self].contracts] begin[:]
<ast.Tuple object at 0x7da20c76d330> assign[=] call[name[c].get_summary, parameter[]]
<ast.AugAssign object at 0x7da20c76c520>
variable[public] assign[=] <ast.ListComp object at 0x7da20c76f8b0>
variable[collect] assign[=] call[name[collections].defaultdict, parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da1b1722200>, <ast.Name object at 0x7da1b1720df0>]]] in starred[name[public]] begin[:]
call[call[name[collect]][name[a]].append, parameter[name[b]]]
variable[public] assign[=] call[name[list], parameter[call[name[collect].items, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1721300>, <ast.Name object at 0x7da1b1723400>]]] in starred[name[public]] begin[:]
<ast.AugAssign object at 0x7da1b1723d00>
variable[functions] assign[=] call[name[sorted], parameter[name[functions]]]
for taget[tuple[[<ast.Name object at 0x7da204623bb0>, <ast.Name object at 0x7da20e956200>]]] in starred[name[functions]] begin[:]
if compare[name[visi] in list[[<ast.Constant object at 0x7da20e954100>, <ast.Constant object at 0x7da20e9544f0>]]] begin[:]
<ast.AugAssign object at 0x7da20e957b80>
for taget[tuple[[<ast.Name object at 0x7da20e955270>, <ast.Name object at 0x7da20e957130>]]] in starred[name[functions]] begin[:]
if compare[name[visi] in list[[<ast.Constant object at 0x7da20e962710>, <ast.Constant object at 0x7da20e960c70>]]] begin[:]
<ast.AugAssign object at 0x7da20e963af0>
for taget[tuple[[<ast.Name object at 0x7da1b16ab100>, <ast.Name object at 0x7da1b16a83a0>]]] in starred[name[functions]] begin[:]
if compare[name[visi] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b16a8c10>, <ast.Constant object at 0x7da1b16a8a60>, <ast.Constant object at 0x7da1b16a95d0>, <ast.Constant object at 0x7da1b16a89a0>]]] begin[:]
<ast.AugAssign object at 0x7da1b16a86d0>
call[name[self].info, parameter[name[txt]]]
|
keyword[def] identifier[output] ( identifier[self] , identifier[_filename] ):
literal[string]
identifier[txt] = literal[string]
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[contracts] :
( identifier[name] , identifier[_inheritance] , identifier[_var] , identifier[func_summaries] , identifier[_modif_summaries] )= identifier[c] . identifier[get_summary] ()
identifier[txt] += identifier[blue] ( literal[string] % identifier[name] )
identifier[public] =[( identifier[elem] [ literal[int] ],( identifier[elem] [ literal[int] ], identifier[elem] [ literal[int] ])) keyword[for] identifier[elem] keyword[in] identifier[func_summaries] ]
identifier[collect] = identifier[collections] . identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[a] , identifier[b] keyword[in] identifier[public] :
identifier[collect] [ identifier[a] ]. identifier[append] ( identifier[b] )
identifier[public] = identifier[list] ( identifier[collect] . identifier[items] ())
keyword[for] identifier[contract] , identifier[functions] keyword[in] identifier[public] :
identifier[txt] += identifier[blue] ( literal[string] . identifier[format] ( identifier[contract] ))
identifier[functions] = identifier[sorted] ( identifier[functions] )
keyword[for] ( identifier[function] , identifier[visi] ) keyword[in] identifier[functions] :
keyword[if] identifier[visi] keyword[in] [ literal[string] , literal[string] ]:
identifier[txt] += identifier[green] ( literal[string] . identifier[format] ( identifier[function] , identifier[visi] ))
keyword[for] ( identifier[function] , identifier[visi] ) keyword[in] identifier[functions] :
keyword[if] identifier[visi] keyword[in] [ literal[string] , literal[string] ]:
identifier[txt] += identifier[magenta] ( literal[string] . identifier[format] ( identifier[function] , identifier[visi] ))
keyword[for] ( identifier[function] , identifier[visi] ) keyword[in] identifier[functions] :
keyword[if] identifier[visi] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[txt] += literal[string] . identifier[format] ( identifier[function] , identifier[visi] )
identifier[self] . identifier[info] ( identifier[txt] )
|
def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
txt = ''
for c in self.contracts:
(name, _inheritance, _var, func_summaries, _modif_summaries) = c.get_summary()
txt += blue('\n+ Contract %s\n' % name)
# (c_name, f_name, visi, _, _, _, _, _) in func_summaries
public = [(elem[0], (elem[1], elem[2])) for elem in func_summaries]
collect = collections.defaultdict(list)
for (a, b) in public:
collect[a].append(b) # depends on [control=['for'], data=[]]
public = list(collect.items())
for (contract, functions) in public:
txt += blue(' - From {}\n'.format(contract))
functions = sorted(functions)
for (function, visi) in functions:
if visi in ['external', 'public']:
txt += green(' - {} ({})\n'.format(function, visi)) # depends on [control=['if'], data=['visi']] # depends on [control=['for'], data=[]]
for (function, visi) in functions:
if visi in ['internal', 'private']:
txt += magenta(' - {} ({})\n'.format(function, visi)) # depends on [control=['if'], data=['visi']] # depends on [control=['for'], data=[]]
for (function, visi) in functions:
if visi not in ['external', 'public', 'internal', 'private']:
txt += ' - {} \xa0({})\n'.format(function, visi) # depends on [control=['if'], data=['visi']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['c']]
self.info(txt)
|
def json_dump(obj, # type: Any
fp, # type: IO[str]
**kwargs # type: Any
): # type: (...) -> None
""" Force use of unicode. """
if six.PY2:
kwargs['encoding'] = 'utf-8'
json.dump(convert_to_dict(obj), fp, **kwargs)
|
def function[json_dump, parameter[obj, fp]]:
constant[ Force use of unicode. ]
if name[six].PY2 begin[:]
call[name[kwargs]][constant[encoding]] assign[=] constant[utf-8]
call[name[json].dump, parameter[call[name[convert_to_dict], parameter[name[obj]]], name[fp]]]
|
keyword[def] identifier[json_dump] ( identifier[obj] ,
identifier[fp] ,
** identifier[kwargs]
):
literal[string]
keyword[if] identifier[six] . identifier[PY2] :
identifier[kwargs] [ literal[string] ]= literal[string]
identifier[json] . identifier[dump] ( identifier[convert_to_dict] ( identifier[obj] ), identifier[fp] ,** identifier[kwargs] )
|
def json_dump(obj, fp, **kwargs): # type: Any
# type: IO[str]
# type: Any
# type: (...) -> None
' Force use of unicode. '
if six.PY2:
kwargs['encoding'] = 'utf-8' # depends on [control=['if'], data=[]]
json.dump(convert_to_dict(obj), fp, **kwargs)
|
def yesno(question, default=None):
"""Asks a yes/no question
Args:
question: string **without** the question mark and without the options.
Example: 'Create links'
default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of
these valus (this argument is case-insensitive)
Returns:
bool: True if user answered Yes, False otherwise
"""
if default is not None:
if isinstance(default, bool):
pass
else:
default_ = default.upper()
if default_ not in ('Y', 'YES', 'N', 'NO'):
raise RuntimeError("Invalid default value: '{}'".format(default))
default = default_ in ('Y', 'YES')
while True:
ans = input("{} ({}/{})? ".format(question, "Y" if default == True else "y",
"N" if default == False else "n")).upper()
if ans == "" and default is not None:
ret = default
break
elif ans in ("N", "NO"):
ret = False
break
elif ans in ("Y", "YES"):
ret = True
break
return ret
|
def function[yesno, parameter[question, default]]:
constant[Asks a yes/no question
Args:
question: string **without** the question mark and without the options.
Example: 'Create links'
default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of
these valus (this argument is case-insensitive)
Returns:
bool: True if user answered Yes, False otherwise
]
if compare[name[default] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[default], name[bool]]] begin[:]
pass
while constant[True] begin[:]
variable[ans] assign[=] call[call[name[input], parameter[call[constant[{} ({}/{})? ].format, parameter[name[question], <ast.IfExp object at 0x7da20e955b70>, <ast.IfExp object at 0x7da20e9556c0>]]]].upper, parameter[]]
if <ast.BoolOp object at 0x7da20e955db0> begin[:]
variable[ret] assign[=] name[default]
break
return[name[ret]]
|
keyword[def] identifier[yesno] ( identifier[question] , identifier[default] = keyword[None] ):
literal[string]
keyword[if] identifier[default] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[default] , identifier[bool] ):
keyword[pass]
keyword[else] :
identifier[default_] = identifier[default] . identifier[upper] ()
keyword[if] identifier[default_] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[default] ))
identifier[default] = identifier[default_] keyword[in] ( literal[string] , literal[string] )
keyword[while] keyword[True] :
identifier[ans] = identifier[input] ( literal[string] . identifier[format] ( identifier[question] , literal[string] keyword[if] identifier[default] == keyword[True] keyword[else] literal[string] ,
literal[string] keyword[if] identifier[default] == keyword[False] keyword[else] literal[string] )). identifier[upper] ()
keyword[if] identifier[ans] == literal[string] keyword[and] identifier[default] keyword[is] keyword[not] keyword[None] :
identifier[ret] = identifier[default]
keyword[break]
keyword[elif] identifier[ans] keyword[in] ( literal[string] , literal[string] ):
identifier[ret] = keyword[False]
keyword[break]
keyword[elif] identifier[ans] keyword[in] ( literal[string] , literal[string] ):
identifier[ret] = keyword[True]
keyword[break]
keyword[return] identifier[ret]
|
def yesno(question, default=None):
"""Asks a yes/no question
Args:
question: string **without** the question mark and without the options.
Example: 'Create links'
default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of
these valus (this argument is case-insensitive)
Returns:
bool: True if user answered Yes, False otherwise
"""
if default is not None:
if isinstance(default, bool):
pass # depends on [control=['if'], data=[]]
else:
default_ = default.upper()
if default_ not in ('Y', 'YES', 'N', 'NO'):
raise RuntimeError("Invalid default value: '{}'".format(default)) # depends on [control=['if'], data=[]]
default = default_ in ('Y', 'YES') # depends on [control=['if'], data=['default']]
while True:
ans = input('{} ({}/{})? '.format(question, 'Y' if default == True else 'y', 'N' if default == False else 'n')).upper()
if ans == '' and default is not None:
ret = default
break # depends on [control=['if'], data=[]]
elif ans in ('N', 'NO'):
ret = False
break # depends on [control=['if'], data=[]]
elif ans in ('Y', 'YES'):
ret = True
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return ret
|
def lookup_obj_attribute(self, obj, field):
"""
Looks for a field's value from the passed in obj. Note that this will strip
leading attributes to deal with subelements if possible
"""
curr_field = field.encode('ascii', 'ignore').decode("utf-8")
rest = None
if field.find('.') >= 0:
curr_field = field.split('.')[0]
rest = '.'.join(field.split('.')[1:])
# next up is the object itself
obj_field = getattr(obj, curr_field, None)
# if it is callable, do so
if obj_field and getattr(obj_field, '__call__', None):
obj_field = obj_field()
if obj_field and rest:
return self.lookup_obj_attribute(obj_field, rest)
else:
return obj_field
|
def function[lookup_obj_attribute, parameter[self, obj, field]]:
constant[
Looks for a field's value from the passed in obj. Note that this will strip
leading attributes to deal with subelements if possible
]
variable[curr_field] assign[=] call[call[name[field].encode, parameter[constant[ascii], constant[ignore]]].decode, parameter[constant[utf-8]]]
variable[rest] assign[=] constant[None]
if compare[call[name[field].find, parameter[constant[.]]] greater_or_equal[>=] constant[0]] begin[:]
variable[curr_field] assign[=] call[call[name[field].split, parameter[constant[.]]]][constant[0]]
variable[rest] assign[=] call[constant[.].join, parameter[call[call[name[field].split, parameter[constant[.]]]][<ast.Slice object at 0x7da1b106cc70>]]]
variable[obj_field] assign[=] call[name[getattr], parameter[name[obj], name[curr_field], constant[None]]]
if <ast.BoolOp object at 0x7da1b106dc00> begin[:]
variable[obj_field] assign[=] call[name[obj_field], parameter[]]
if <ast.BoolOp object at 0x7da1b106fc40> begin[:]
return[call[name[self].lookup_obj_attribute, parameter[name[obj_field], name[rest]]]]
|
keyword[def] identifier[lookup_obj_attribute] ( identifier[self] , identifier[obj] , identifier[field] ):
literal[string]
identifier[curr_field] = identifier[field] . identifier[encode] ( literal[string] , literal[string] ). identifier[decode] ( literal[string] )
identifier[rest] = keyword[None]
keyword[if] identifier[field] . identifier[find] ( literal[string] )>= literal[int] :
identifier[curr_field] = identifier[field] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[rest] = literal[string] . identifier[join] ( identifier[field] . identifier[split] ( literal[string] )[ literal[int] :])
identifier[obj_field] = identifier[getattr] ( identifier[obj] , identifier[curr_field] , keyword[None] )
keyword[if] identifier[obj_field] keyword[and] identifier[getattr] ( identifier[obj_field] , literal[string] , keyword[None] ):
identifier[obj_field] = identifier[obj_field] ()
keyword[if] identifier[obj_field] keyword[and] identifier[rest] :
keyword[return] identifier[self] . identifier[lookup_obj_attribute] ( identifier[obj_field] , identifier[rest] )
keyword[else] :
keyword[return] identifier[obj_field]
|
def lookup_obj_attribute(self, obj, field):
"""
Looks for a field's value from the passed in obj. Note that this will strip
leading attributes to deal with subelements if possible
"""
curr_field = field.encode('ascii', 'ignore').decode('utf-8')
rest = None
if field.find('.') >= 0:
curr_field = field.split('.')[0]
rest = '.'.join(field.split('.')[1:]) # depends on [control=['if'], data=[]]
# next up is the object itself
obj_field = getattr(obj, curr_field, None)
# if it is callable, do so
if obj_field and getattr(obj_field, '__call__', None):
obj_field = obj_field() # depends on [control=['if'], data=[]]
if obj_field and rest:
return self.lookup_obj_attribute(obj_field, rest) # depends on [control=['if'], data=[]]
else:
return obj_field
|
def get_incomplete_penetrance_genes(hpo_lines):
"""Get a set with all genes that have incomplete penetrance according to HPO
Args:
hpo_lines(iterable(str))
Returns:
incomplete_penetrance_genes(set): A set with the hgnc symbols of all
genes with incomplete penetrance
"""
genes = parse_hpo_genes(hpo_lines)
incomplete_penetrance_genes = set()
for hgnc_symbol in genes:
if genes[hgnc_symbol].get('incomplete_penetrance'):
incomplete_penetrance_genes.add(hgnc_symbol)
return incomplete_penetrance_genes
|
def function[get_incomplete_penetrance_genes, parameter[hpo_lines]]:
constant[Get a set with all genes that have incomplete penetrance according to HPO
Args:
hpo_lines(iterable(str))
Returns:
incomplete_penetrance_genes(set): A set with the hgnc symbols of all
genes with incomplete penetrance
]
variable[genes] assign[=] call[name[parse_hpo_genes], parameter[name[hpo_lines]]]
variable[incomplete_penetrance_genes] assign[=] call[name[set], parameter[]]
for taget[name[hgnc_symbol]] in starred[name[genes]] begin[:]
if call[call[name[genes]][name[hgnc_symbol]].get, parameter[constant[incomplete_penetrance]]] begin[:]
call[name[incomplete_penetrance_genes].add, parameter[name[hgnc_symbol]]]
return[name[incomplete_penetrance_genes]]
|
keyword[def] identifier[get_incomplete_penetrance_genes] ( identifier[hpo_lines] ):
literal[string]
identifier[genes] = identifier[parse_hpo_genes] ( identifier[hpo_lines] )
identifier[incomplete_penetrance_genes] = identifier[set] ()
keyword[for] identifier[hgnc_symbol] keyword[in] identifier[genes] :
keyword[if] identifier[genes] [ identifier[hgnc_symbol] ]. identifier[get] ( literal[string] ):
identifier[incomplete_penetrance_genes] . identifier[add] ( identifier[hgnc_symbol] )
keyword[return] identifier[incomplete_penetrance_genes]
|
def get_incomplete_penetrance_genes(hpo_lines):
"""Get a set with all genes that have incomplete penetrance according to HPO
Args:
hpo_lines(iterable(str))
Returns:
incomplete_penetrance_genes(set): A set with the hgnc symbols of all
genes with incomplete penetrance
"""
genes = parse_hpo_genes(hpo_lines)
incomplete_penetrance_genes = set()
for hgnc_symbol in genes:
if genes[hgnc_symbol].get('incomplete_penetrance'):
incomplete_penetrance_genes.add(hgnc_symbol) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['hgnc_symbol']]
return incomplete_penetrance_genes
|
def repvals(self, method):
"""Compute representative statistical values for this Uval. `method`
may be either 'pct' or 'gauss'.
Returns (best, plus_one_sigma, minus_one_sigma), where `best` is the
"best" value in some sense, and the others correspond to values at
the ~84 and 16 percentile limits, respectively. Because of the
sampled nature of the Uval system, there is no single method to
compute these numbers.
The "pct" method returns the 50th, 15.866th, and 84.134th percentile
values.
The "gauss" method computes the mean μ and standard deviation σ of the
samples and returns [μ, μ+σ, μ-σ].
"""
if method == 'pct':
return pk_scoreatpercentile(self.d, [50., 84.134, 15.866])
if method == 'gauss':
m, s = self.d.mean(), self.d.std()
return np.asarray([m, m + s, m - s])
raise ValueError('unknown representative-value method "%s"' % method)
|
def function[repvals, parameter[self, method]]:
constant[Compute representative statistical values for this Uval. `method`
may be either 'pct' or 'gauss'.
Returns (best, plus_one_sigma, minus_one_sigma), where `best` is the
"best" value in some sense, and the others correspond to values at
the ~84 and 16 percentile limits, respectively. Because of the
sampled nature of the Uval system, there is no single method to
compute these numbers.
The "pct" method returns the 50th, 15.866th, and 84.134th percentile
values.
The "gauss" method computes the mean μ and standard deviation σ of the
samples and returns [μ, μ+σ, μ-σ].
]
if compare[name[method] equal[==] constant[pct]] begin[:]
return[call[name[pk_scoreatpercentile], parameter[name[self].d, list[[<ast.Constant object at 0x7da1b2724040>, <ast.Constant object at 0x7da1b2724250>, <ast.Constant object at 0x7da1b2726b60>]]]]]
if compare[name[method] equal[==] constant[gauss]] begin[:]
<ast.Tuple object at 0x7da1b27270d0> assign[=] tuple[[<ast.Call object at 0x7da1b2726e60>, <ast.Call object at 0x7da1b2726e00>]]
return[call[name[np].asarray, parameter[list[[<ast.Name object at 0x7da1b2727700>, <ast.BinOp object at 0x7da1b27275e0>, <ast.BinOp object at 0x7da1b2727250>]]]]]
<ast.Raise object at 0x7da1b2726200>
|
keyword[def] identifier[repvals] ( identifier[self] , identifier[method] ):
literal[string]
keyword[if] identifier[method] == literal[string] :
keyword[return] identifier[pk_scoreatpercentile] ( identifier[self] . identifier[d] ,[ literal[int] , literal[int] , literal[int] ])
keyword[if] identifier[method] == literal[string] :
identifier[m] , identifier[s] = identifier[self] . identifier[d] . identifier[mean] (), identifier[self] . identifier[d] . identifier[std] ()
keyword[return] identifier[np] . identifier[asarray] ([ identifier[m] , identifier[m] + identifier[s] , identifier[m] - identifier[s] ])
keyword[raise] identifier[ValueError] ( literal[string] % identifier[method] )
|
def repvals(self, method):
"""Compute representative statistical values for this Uval. `method`
may be either 'pct' or 'gauss'.
Returns (best, plus_one_sigma, minus_one_sigma), where `best` is the
"best" value in some sense, and the others correspond to values at
the ~84 and 16 percentile limits, respectively. Because of the
sampled nature of the Uval system, there is no single method to
compute these numbers.
The "pct" method returns the 50th, 15.866th, and 84.134th percentile
values.
The "gauss" method computes the mean μ and standard deviation σ of the
samples and returns [μ, μ+σ, μ-σ].
"""
if method == 'pct':
return pk_scoreatpercentile(self.d, [50.0, 84.134, 15.866]) # depends on [control=['if'], data=[]]
if method == 'gauss':
(m, s) = (self.d.mean(), self.d.std())
return np.asarray([m, m + s, m - s]) # depends on [control=['if'], data=[]]
raise ValueError('unknown representative-value method "%s"' % method)
|
def get_legend_text(obj):
"""Check if line is in legend.
"""
leg = obj.axes.get_legend()
if leg is None:
return None
keys = [l.get_label() for l in leg.legendHandles if l is not None]
values = [l.get_text() for l in leg.texts]
label = obj.get_label()
d = dict(zip(keys, values))
if label in d:
return d[label]
return None
|
def function[get_legend_text, parameter[obj]]:
constant[Check if line is in legend.
]
variable[leg] assign[=] call[name[obj].axes.get_legend, parameter[]]
if compare[name[leg] is constant[None]] begin[:]
return[constant[None]]
variable[keys] assign[=] <ast.ListComp object at 0x7da18bc736a0>
variable[values] assign[=] <ast.ListComp object at 0x7da18bc72cb0>
variable[label] assign[=] call[name[obj].get_label, parameter[]]
variable[d] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[keys], name[values]]]]]
if compare[name[label] in name[d]] begin[:]
return[call[name[d]][name[label]]]
return[constant[None]]
|
keyword[def] identifier[get_legend_text] ( identifier[obj] ):
literal[string]
identifier[leg] = identifier[obj] . identifier[axes] . identifier[get_legend] ()
keyword[if] identifier[leg] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[keys] =[ identifier[l] . identifier[get_label] () keyword[for] identifier[l] keyword[in] identifier[leg] . identifier[legendHandles] keyword[if] identifier[l] keyword[is] keyword[not] keyword[None] ]
identifier[values] =[ identifier[l] . identifier[get_text] () keyword[for] identifier[l] keyword[in] identifier[leg] . identifier[texts] ]
identifier[label] = identifier[obj] . identifier[get_label] ()
identifier[d] = identifier[dict] ( identifier[zip] ( identifier[keys] , identifier[values] ))
keyword[if] identifier[label] keyword[in] identifier[d] :
keyword[return] identifier[d] [ identifier[label] ]
keyword[return] keyword[None]
|
def get_legend_text(obj):
"""Check if line is in legend.
"""
leg = obj.axes.get_legend()
if leg is None:
return None # depends on [control=['if'], data=[]]
keys = [l.get_label() for l in leg.legendHandles if l is not None]
values = [l.get_text() for l in leg.texts]
label = obj.get_label()
d = dict(zip(keys, values))
if label in d:
return d[label] # depends on [control=['if'], data=['label', 'd']]
return None
|
def parse_bytes(self, i):
"""Parse byte."""
value = int(self.get_byte(i), 16)
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(chr(value), self.span_stack[-1])
value = ord(self.convert_case(text, single)) if single is not None else ord(text)
elif single:
value = ord(self.convert_case(chr(value), single))
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(chr(value), i)
else:
self.result.append('\\%03o' % value)
|
def function[parse_bytes, parameter[self, i]]:
constant[Parse byte.]
variable[value] assign[=] call[name[int], parameter[call[name[self].get_byte, parameter[name[i]]], constant[16]]]
variable[single] assign[=] call[name[self].get_single_stack, parameter[]]
if name[self].span_stack begin[:]
variable[text] assign[=] call[name[self].convert_case, parameter[call[name[chr], parameter[name[value]]], call[name[self].span_stack][<ast.UnaryOp object at 0x7da1b04ef820>]]]
variable[value] assign[=] <ast.IfExp object at 0x7da1b04ee230>
if <ast.BoolOp object at 0x7da1b036d5d0> begin[:]
call[name[self].handle_format, parameter[call[name[chr], parameter[name[value]]], name[i]]]
|
keyword[def] identifier[parse_bytes] ( identifier[self] , identifier[i] ):
literal[string]
identifier[value] = identifier[int] ( identifier[self] . identifier[get_byte] ( identifier[i] ), literal[int] )
identifier[single] = identifier[self] . identifier[get_single_stack] ()
keyword[if] identifier[self] . identifier[span_stack] :
identifier[text] = identifier[self] . identifier[convert_case] ( identifier[chr] ( identifier[value] ), identifier[self] . identifier[span_stack] [- literal[int] ])
identifier[value] = identifier[ord] ( identifier[self] . identifier[convert_case] ( identifier[text] , identifier[single] )) keyword[if] identifier[single] keyword[is] keyword[not] keyword[None] keyword[else] identifier[ord] ( identifier[text] )
keyword[elif] identifier[single] :
identifier[value] = identifier[ord] ( identifier[self] . identifier[convert_case] ( identifier[chr] ( identifier[value] ), identifier[single] ))
keyword[if] identifier[self] . identifier[use_format] keyword[and] identifier[value] keyword[in] identifier[_CURLY_BRACKETS_ORD] :
identifier[self] . identifier[handle_format] ( identifier[chr] ( identifier[value] ), identifier[i] )
keyword[else] :
identifier[self] . identifier[result] . identifier[append] ( literal[string] % identifier[value] )
|
def parse_bytes(self, i):
"""Parse byte."""
value = int(self.get_byte(i), 16)
single = self.get_single_stack()
if self.span_stack:
text = self.convert_case(chr(value), self.span_stack[-1])
value = ord(self.convert_case(text, single)) if single is not None else ord(text) # depends on [control=['if'], data=[]]
elif single:
value = ord(self.convert_case(chr(value), single)) # depends on [control=['if'], data=[]]
if self.use_format and value in _CURLY_BRACKETS_ORD:
self.handle_format(chr(value), i) # depends on [control=['if'], data=[]]
else:
self.result.append('\\%03o' % value)
|
def _parse_feature_value(self, value):
""" Checks if value fits the feature type. If not it tries to fix it or raise an error
:raises: ValueError
"""
if isinstance(value, _FileLoader):
return value
if not hasattr(self, 'ndim'): # Because of serialization/deserialization during multiprocessing
return value
if self.ndim:
if not isinstance(value, np.ndarray):
raise ValueError('{} feature has to be a numpy array'.format(self.feature_type))
if value.ndim != self.ndim:
raise ValueError('Numpy array of {} feature has to have {} '
'dimension{}'.format(self.feature_type, self.ndim, 's' if self.ndim > 1 else ''))
if self.feature_type.is_discrete():
if not issubclass(value.dtype.type, (np.integer, np.bool, np.bool_, np.bool8)):
msg = '{} is a discrete feature type therefore dtype of data should be a subtype of ' \
'numpy.integer or numpy.bool, found type {}. In the future an error will be raised because' \
'of this'.format(self.feature_type, value.dtype.type)
warnings.warn(msg, DeprecationWarning, stacklevel=3)
# raise ValueError('{} is a discrete feature type therefore dtype of data has to be a subtype of '
# 'numpy.integer or numpy.bool, found type {}'.format(self.feature_type,
# value.dtype.type))
# This checking is disabled for now
# else:
# if not issubclass(value.dtype.type, (np.floating, np.float)):
# raise ValueError('{} is a floating feature type therefore dtype of data has to be a subtype of '
# 'numpy.floating or numpy.float, found type {}'.format(self.feature_type,
# value.dtype.type))
return value
if self.is_vector:
if isinstance(value, gpd.GeoSeries):
value = gpd.GeoDataFrame(dict(geometry=value), crs=value.crs)
if isinstance(value, gpd.GeoDataFrame):
if self.feature_type is FeatureType.VECTOR:
if FeatureType.TIMESTAMP.value.upper() not in value:
raise ValueError("{} feature has to contain a column 'TIMESTAMP' with "
"timestamps".format(self.feature_type))
return value
raise ValueError('{} feature works with data of type {}, parsing data type {} is not supported'
'given'.format(self.feature_type, gpd.GeoDataFrame.__name__, type(value)))
return value
|
def function[_parse_feature_value, parameter[self, value]]:
constant[ Checks if value fits the feature type. If not it tries to fix it or raise an error
:raises: ValueError
]
if call[name[isinstance], parameter[name[value], name[_FileLoader]]] begin[:]
return[name[value]]
if <ast.UnaryOp object at 0x7da1b26ad870> begin[:]
return[name[value]]
if name[self].ndim begin[:]
if <ast.UnaryOp object at 0x7da1b26ad4e0> begin[:]
<ast.Raise object at 0x7da1b26aca00>
if compare[name[value].ndim not_equal[!=] name[self].ndim] begin[:]
<ast.Raise object at 0x7da1b26afa90>
if call[name[self].feature_type.is_discrete, parameter[]] begin[:]
if <ast.UnaryOp object at 0x7da1b26aeda0> begin[:]
variable[msg] assign[=] call[constant[{} is a discrete feature type therefore dtype of data should be a subtype of numpy.integer or numpy.bool, found type {}. In the future an error will be raised becauseof this].format, parameter[name[self].feature_type, name[value].dtype.type]]
call[name[warnings].warn, parameter[name[msg], name[DeprecationWarning]]]
return[name[value]]
if name[self].is_vector begin[:]
if call[name[isinstance], parameter[name[value], name[gpd].GeoSeries]] begin[:]
variable[value] assign[=] call[name[gpd].GeoDataFrame, parameter[call[name[dict], parameter[]]]]
if call[name[isinstance], parameter[name[value], name[gpd].GeoDataFrame]] begin[:]
if compare[name[self].feature_type is name[FeatureType].VECTOR] begin[:]
if compare[call[name[FeatureType].TIMESTAMP.value.upper, parameter[]] <ast.NotIn object at 0x7da2590d7190> name[value]] begin[:]
<ast.Raise object at 0x7da20c7c91e0>
return[name[value]]
<ast.Raise object at 0x7da20c7cac20>
return[name[value]]
|
keyword[def] identifier[_parse_feature_value] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[_FileLoader] ):
keyword[return] identifier[value]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[return] identifier[value]
keyword[if] identifier[self] . identifier[ndim] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[np] . identifier[ndarray] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[self] . identifier[feature_type] ))
keyword[if] identifier[value] . identifier[ndim] != identifier[self] . identifier[ndim] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[feature_type] , identifier[self] . identifier[ndim] , literal[string] keyword[if] identifier[self] . identifier[ndim] > literal[int] keyword[else] literal[string] ))
keyword[if] identifier[self] . identifier[feature_type] . identifier[is_discrete] ():
keyword[if] keyword[not] identifier[issubclass] ( identifier[value] . identifier[dtype] . identifier[type] ,( identifier[np] . identifier[integer] , identifier[np] . identifier[bool] , identifier[np] . identifier[bool_] , identifier[np] . identifier[bool8] )):
identifier[msg] = literal[string] literal[string] literal[string] . identifier[format] ( identifier[self] . identifier[feature_type] , identifier[value] . identifier[dtype] . identifier[type] )
identifier[warnings] . identifier[warn] ( identifier[msg] , identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] )
keyword[return] identifier[value]
keyword[if] identifier[self] . identifier[is_vector] :
keyword[if] identifier[isinstance] ( identifier[value] , identifier[gpd] . identifier[GeoSeries] ):
identifier[value] = identifier[gpd] . identifier[GeoDataFrame] ( identifier[dict] ( identifier[geometry] = identifier[value] ), identifier[crs] = identifier[value] . identifier[crs] )
keyword[if] identifier[isinstance] ( identifier[value] , identifier[gpd] . identifier[GeoDataFrame] ):
keyword[if] identifier[self] . identifier[feature_type] keyword[is] identifier[FeatureType] . identifier[VECTOR] :
keyword[if] identifier[FeatureType] . identifier[TIMESTAMP] . identifier[value] . identifier[upper] () keyword[not] keyword[in] identifier[value] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[feature_type] ))
keyword[return] identifier[value]
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[feature_type] , identifier[gpd] . identifier[GeoDataFrame] . identifier[__name__] , identifier[type] ( identifier[value] )))
keyword[return] identifier[value]
|
def _parse_feature_value(self, value):
""" Checks if value fits the feature type. If not it tries to fix it or raise an error
:raises: ValueError
"""
if isinstance(value, _FileLoader):
return value # depends on [control=['if'], data=[]]
if not hasattr(self, 'ndim'): # Because of serialization/deserialization during multiprocessing
return value # depends on [control=['if'], data=[]]
if self.ndim:
if not isinstance(value, np.ndarray):
raise ValueError('{} feature has to be a numpy array'.format(self.feature_type)) # depends on [control=['if'], data=[]]
if value.ndim != self.ndim:
raise ValueError('Numpy array of {} feature has to have {} dimension{}'.format(self.feature_type, self.ndim, 's' if self.ndim > 1 else '')) # depends on [control=['if'], data=[]]
if self.feature_type.is_discrete():
if not issubclass(value.dtype.type, (np.integer, np.bool, np.bool_, np.bool8)):
msg = '{} is a discrete feature type therefore dtype of data should be a subtype of numpy.integer or numpy.bool, found type {}. In the future an error will be raised becauseof this'.format(self.feature_type, value.dtype.type)
warnings.warn(msg, DeprecationWarning, stacklevel=3) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # raise ValueError('{} is a discrete feature type therefore dtype of data has to be a subtype of '
# 'numpy.integer or numpy.bool, found type {}'.format(self.feature_type,
# value.dtype.type))
# This checking is disabled for now
# else:
# if not issubclass(value.dtype.type, (np.floating, np.float)):
# raise ValueError('{} is a floating feature type therefore dtype of data has to be a subtype of '
# 'numpy.floating or numpy.float, found type {}'.format(self.feature_type,
# value.dtype.type))
return value # depends on [control=['if'], data=[]]
if self.is_vector:
if isinstance(value, gpd.GeoSeries):
value = gpd.GeoDataFrame(dict(geometry=value), crs=value.crs) # depends on [control=['if'], data=[]]
if isinstance(value, gpd.GeoDataFrame):
if self.feature_type is FeatureType.VECTOR:
if FeatureType.TIMESTAMP.value.upper() not in value:
raise ValueError("{} feature has to contain a column 'TIMESTAMP' with timestamps".format(self.feature_type)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return value # depends on [control=['if'], data=[]]
raise ValueError('{} feature works with data of type {}, parsing data type {} is not supportedgiven'.format(self.feature_type, gpd.GeoDataFrame.__name__, type(value))) # depends on [control=['if'], data=[]]
return value
|
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
'''
Remove packages using ``opkg remove``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
remove_dependencies
Remove package and all dependencies
.. versionadded:: 2019.2.0
auto_remove_deps
Remove packages that were installed automatically to satisfy dependencies
.. versionadded:: 2019.2.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
salt '*' pkg.remove pkgs='["foo", "bar"]' remove_dependencies=True auto_remove_deps=True
'''
try:
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
targets = [x for x in pkg_params if x in old]
if not targets:
return {}
cmd = ['opkg', 'remove']
_append_noaction_if_testmode(cmd, **kwargs)
if kwargs.get('remove_dependencies', False):
cmd.append('--force-removal-of-dependent-packages')
if kwargs.get('auto_remove_deps', False):
cmd.append('--autoremove')
cmd.extend(targets)
out = __salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False
)
if out['retcode'] != 0:
if out['stderr']:
errors = [out['stderr']]
else:
errors = [out['stdout']]
else:
errors = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
if _is_testmode(**kwargs):
reportedPkgs = _parse_reported_packages_from_remove_output(out['stdout'])
new = {k: v for k, v in new.items() if k not in reportedPkgs}
ret = salt.utils.data.compare_dicts(old, new)
rs_result = _get_restartcheck_result(errors)
if errors:
raise CommandExecutionError(
'Problem encountered removing package(s)',
info={'errors': errors, 'changes': ret}
)
_process_restartcheck_result(rs_result, **kwargs)
return ret
|
def function[remove, parameter[name, pkgs]]:
constant[
Remove packages using ``opkg remove``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
remove_dependencies
Remove package and all dependencies
.. versionadded:: 2019.2.0
auto_remove_deps
Remove packages that were installed automatically to satisfy dependencies
.. versionadded:: 2019.2.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
salt '*' pkg.remove pkgs='["foo", "bar"]' remove_dependencies=True auto_remove_deps=True
]
<ast.Try object at 0x7da20c6c4490>
variable[old] assign[=] call[name[list_pkgs], parameter[]]
variable[targets] assign[=] <ast.ListComp object at 0x7da20c6c77c0>
if <ast.UnaryOp object at 0x7da20c6c4460> begin[:]
return[dictionary[[], []]]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da20c6c7a30>, <ast.Constant object at 0x7da20c6c7040>]]
call[name[_append_noaction_if_testmode], parameter[name[cmd]]]
if call[name[kwargs].get, parameter[constant[remove_dependencies], constant[False]]] begin[:]
call[name[cmd].append, parameter[constant[--force-removal-of-dependent-packages]]]
if call[name[kwargs].get, parameter[constant[auto_remove_deps], constant[False]]] begin[:]
call[name[cmd].append, parameter[constant[--autoremove]]]
call[name[cmd].extend, parameter[name[targets]]]
variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
if compare[call[name[out]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
if call[name[out]][constant[stderr]] begin[:]
variable[errors] assign[=] list[[<ast.Subscript object at 0x7da20c6c6fe0>]]
call[name[__context__].pop, parameter[constant[pkg.list_pkgs], constant[None]]]
variable[new] assign[=] call[name[list_pkgs], parameter[]]
if call[name[_is_testmode], parameter[]] begin[:]
variable[reportedPkgs] assign[=] call[name[_parse_reported_packages_from_remove_output], parameter[call[name[out]][constant[stdout]]]]
variable[new] assign[=] <ast.DictComp object at 0x7da20c6c6f20>
variable[ret] assign[=] call[name[salt].utils.data.compare_dicts, parameter[name[old], name[new]]]
variable[rs_result] assign[=] call[name[_get_restartcheck_result], parameter[name[errors]]]
if name[errors] begin[:]
<ast.Raise object at 0x7da20c6c4c40>
call[name[_process_restartcheck_result], parameter[name[rs_result]]]
return[name[ret]]
|
keyword[def] identifier[remove] ( identifier[name] = keyword[None] , identifier[pkgs] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[pkg_params] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[pkgs] )[ literal[int] ]
keyword[except] identifier[MinionError] keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] ( identifier[exc] )
identifier[old] = identifier[list_pkgs] ()
identifier[targets] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[pkg_params] keyword[if] identifier[x] keyword[in] identifier[old] ]
keyword[if] keyword[not] identifier[targets] :
keyword[return] {}
identifier[cmd] =[ literal[string] , literal[string] ]
identifier[_append_noaction_if_testmode] ( identifier[cmd] ,** identifier[kwargs] )
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
identifier[cmd] . identifier[append] ( literal[string] )
identifier[cmd] . identifier[extend] ( identifier[targets] )
identifier[out] = identifier[__salt__] [ literal[string] ](
identifier[cmd] ,
identifier[output_loglevel] = literal[string] ,
identifier[python_shell] = keyword[False]
)
keyword[if] identifier[out] [ literal[string] ]!= literal[int] :
keyword[if] identifier[out] [ literal[string] ]:
identifier[errors] =[ identifier[out] [ literal[string] ]]
keyword[else] :
identifier[errors] =[ identifier[out] [ literal[string] ]]
keyword[else] :
identifier[errors] =[]
identifier[__context__] . identifier[pop] ( literal[string] , keyword[None] )
identifier[new] = identifier[list_pkgs] ()
keyword[if] identifier[_is_testmode] (** identifier[kwargs] ):
identifier[reportedPkgs] = identifier[_parse_reported_packages_from_remove_output] ( identifier[out] [ literal[string] ])
identifier[new] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[new] . identifier[items] () keyword[if] identifier[k] keyword[not] keyword[in] identifier[reportedPkgs] }
identifier[ret] = identifier[salt] . identifier[utils] . identifier[data] . identifier[compare_dicts] ( identifier[old] , identifier[new] )
identifier[rs_result] = identifier[_get_restartcheck_result] ( identifier[errors] )
keyword[if] identifier[errors] :
keyword[raise] identifier[CommandExecutionError] (
literal[string] ,
identifier[info] ={ literal[string] : identifier[errors] , literal[string] : identifier[ret] }
)
identifier[_process_restartcheck_result] ( identifier[rs_result] ,** identifier[kwargs] )
keyword[return] identifier[ret]
|
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
'\n Remove packages using ``opkg remove``.\n\n name\n The name of the package to be deleted.\n\n\n Multiple Package Options:\n\n pkgs\n A list of packages to delete. Must be passed as a python list. The\n ``name`` parameter will be ignored if this option is passed.\n\n remove_dependencies\n Remove package and all dependencies\n\n .. versionadded:: 2019.2.0\n\n auto_remove_deps\n Remove packages that were installed automatically to satisfy dependencies\n\n .. versionadded:: 2019.2.0\n\n Returns a dict containing the changes.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt \'*\' pkg.remove <package name>\n salt \'*\' pkg.remove <package1>,<package2>,<package3>\n salt \'*\' pkg.remove pkgs=\'["foo", "bar"]\'\n salt \'*\' pkg.remove pkgs=\'["foo", "bar"]\' remove_dependencies=True auto_remove_deps=True\n '
try:
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0] # depends on [control=['try'], data=[]]
except MinionError as exc:
raise CommandExecutionError(exc) # depends on [control=['except'], data=['exc']]
old = list_pkgs()
targets = [x for x in pkg_params if x in old]
if not targets:
return {} # depends on [control=['if'], data=[]]
cmd = ['opkg', 'remove']
_append_noaction_if_testmode(cmd, **kwargs)
if kwargs.get('remove_dependencies', False):
cmd.append('--force-removal-of-dependent-packages') # depends on [control=['if'], data=[]]
if kwargs.get('auto_remove_deps', False):
cmd.append('--autoremove') # depends on [control=['if'], data=[]]
cmd.extend(targets)
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
if out['retcode'] != 0:
if out['stderr']:
errors = [out['stderr']] # depends on [control=['if'], data=[]]
else:
errors = [out['stdout']] # depends on [control=['if'], data=[]]
else:
errors = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
if _is_testmode(**kwargs):
reportedPkgs = _parse_reported_packages_from_remove_output(out['stdout'])
new = {k: v for (k, v) in new.items() if k not in reportedPkgs} # depends on [control=['if'], data=[]]
ret = salt.utils.data.compare_dicts(old, new)
rs_result = _get_restartcheck_result(errors)
if errors:
raise CommandExecutionError('Problem encountered removing package(s)', info={'errors': errors, 'changes': ret}) # depends on [control=['if'], data=[]]
_process_restartcheck_result(rs_result, **kwargs)
return ret
|
def _do_leave(self, leave, in_port, msg):
"""the process when the querier received a LEAVE message."""
datapath = msg.datapath
parser = datapath.ofproto_parser
self._mcast.setdefault(leave.address, {})
if in_port in self._mcast[leave.address]:
self._del_flow_entry(
datapath, in_port, leave.address)
del self._mcast[leave.address][in_port]
actions = []
for port in self._mcast[leave.address]:
actions.append(parser.OFPActionOutput(port))
if len(actions):
self._set_flow_entry(
datapath, actions, self.server_port, leave.address)
else:
self._del_flow_entry(
datapath, self.server_port, leave.address)
|
def function[_do_leave, parameter[self, leave, in_port, msg]]:
constant[the process when the querier received a LEAVE message.]
variable[datapath] assign[=] name[msg].datapath
variable[parser] assign[=] name[datapath].ofproto_parser
call[name[self]._mcast.setdefault, parameter[name[leave].address, dictionary[[], []]]]
if compare[name[in_port] in call[name[self]._mcast][name[leave].address]] begin[:]
call[name[self]._del_flow_entry, parameter[name[datapath], name[in_port], name[leave].address]]
<ast.Delete object at 0x7da1b1a223b0>
variable[actions] assign[=] list[[]]
for taget[name[port]] in starred[call[name[self]._mcast][name[leave].address]] begin[:]
call[name[actions].append, parameter[call[name[parser].OFPActionOutput, parameter[name[port]]]]]
if call[name[len], parameter[name[actions]]] begin[:]
call[name[self]._set_flow_entry, parameter[name[datapath], name[actions], name[self].server_port, name[leave].address]]
|
keyword[def] identifier[_do_leave] ( identifier[self] , identifier[leave] , identifier[in_port] , identifier[msg] ):
literal[string]
identifier[datapath] = identifier[msg] . identifier[datapath]
identifier[parser] = identifier[datapath] . identifier[ofproto_parser]
identifier[self] . identifier[_mcast] . identifier[setdefault] ( identifier[leave] . identifier[address] ,{})
keyword[if] identifier[in_port] keyword[in] identifier[self] . identifier[_mcast] [ identifier[leave] . identifier[address] ]:
identifier[self] . identifier[_del_flow_entry] (
identifier[datapath] , identifier[in_port] , identifier[leave] . identifier[address] )
keyword[del] identifier[self] . identifier[_mcast] [ identifier[leave] . identifier[address] ][ identifier[in_port] ]
identifier[actions] =[]
keyword[for] identifier[port] keyword[in] identifier[self] . identifier[_mcast] [ identifier[leave] . identifier[address] ]:
identifier[actions] . identifier[append] ( identifier[parser] . identifier[OFPActionOutput] ( identifier[port] ))
keyword[if] identifier[len] ( identifier[actions] ):
identifier[self] . identifier[_set_flow_entry] (
identifier[datapath] , identifier[actions] , identifier[self] . identifier[server_port] , identifier[leave] . identifier[address] )
keyword[else] :
identifier[self] . identifier[_del_flow_entry] (
identifier[datapath] , identifier[self] . identifier[server_port] , identifier[leave] . identifier[address] )
|
def _do_leave(self, leave, in_port, msg):
"""the process when the querier received a LEAVE message."""
datapath = msg.datapath
parser = datapath.ofproto_parser
self._mcast.setdefault(leave.address, {})
if in_port in self._mcast[leave.address]:
self._del_flow_entry(datapath, in_port, leave.address)
del self._mcast[leave.address][in_port]
actions = []
for port in self._mcast[leave.address]:
actions.append(parser.OFPActionOutput(port)) # depends on [control=['for'], data=['port']]
if len(actions):
self._set_flow_entry(datapath, actions, self.server_port, leave.address) # depends on [control=['if'], data=[]]
else:
self._del_flow_entry(datapath, self.server_port, leave.address) # depends on [control=['if'], data=['in_port']]
|
def lower_context_field_existence(ir_blocks, query_metadata_table):
"""Lower ContextFieldExistence expressions into lower-level expressions."""
def regular_visitor_fn(expression):
"""Expression visitor function that rewrites ContextFieldExistence expressions."""
if not isinstance(expression, ContextFieldExistence):
return expression
location_type = query_metadata_table.get_location_info(expression.location).type
# Since this function is only used in blocks that aren't ConstructResult,
# the location check is performed using a regular ContextField expression.
return BinaryComposition(
u'!=',
ContextField(expression.location, location_type),
NullLiteral)
def construct_result_visitor_fn(expression):
"""Expression visitor function that rewrites ContextFieldExistence expressions."""
if not isinstance(expression, ContextFieldExistence):
return expression
location_type = query_metadata_table.get_location_info(expression.location).type
# Since this function is only used in ConstructResult blocks,
# the location check is performed using the special OutputContextVertex expression.
return BinaryComposition(
u'!=',
OutputContextVertex(expression.location, location_type),
NullLiteral)
new_ir_blocks = []
for block in ir_blocks:
new_block = None
if isinstance(block, ConstructResult):
new_block = block.visit_and_update_expressions(construct_result_visitor_fn)
else:
new_block = block.visit_and_update_expressions(regular_visitor_fn)
new_ir_blocks.append(new_block)
return new_ir_blocks
|
def function[lower_context_field_existence, parameter[ir_blocks, query_metadata_table]]:
constant[Lower ContextFieldExistence expressions into lower-level expressions.]
def function[regular_visitor_fn, parameter[expression]]:
constant[Expression visitor function that rewrites ContextFieldExistence expressions.]
if <ast.UnaryOp object at 0x7da18dc9b550> begin[:]
return[name[expression]]
variable[location_type] assign[=] call[name[query_metadata_table].get_location_info, parameter[name[expression].location]].type
return[call[name[BinaryComposition], parameter[constant[!=], call[name[ContextField], parameter[name[expression].location, name[location_type]]], name[NullLiteral]]]]
def function[construct_result_visitor_fn, parameter[expression]]:
constant[Expression visitor function that rewrites ContextFieldExistence expressions.]
if <ast.UnaryOp object at 0x7da1b1724400> begin[:]
return[name[expression]]
variable[location_type] assign[=] call[name[query_metadata_table].get_location_info, parameter[name[expression].location]].type
return[call[name[BinaryComposition], parameter[constant[!=], call[name[OutputContextVertex], parameter[name[expression].location, name[location_type]]], name[NullLiteral]]]]
variable[new_ir_blocks] assign[=] list[[]]
for taget[name[block]] in starred[name[ir_blocks]] begin[:]
variable[new_block] assign[=] constant[None]
if call[name[isinstance], parameter[name[block], name[ConstructResult]]] begin[:]
variable[new_block] assign[=] call[name[block].visit_and_update_expressions, parameter[name[construct_result_visitor_fn]]]
call[name[new_ir_blocks].append, parameter[name[new_block]]]
return[name[new_ir_blocks]]
|
keyword[def] identifier[lower_context_field_existence] ( identifier[ir_blocks] , identifier[query_metadata_table] ):
literal[string]
keyword[def] identifier[regular_visitor_fn] ( identifier[expression] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[expression] , identifier[ContextFieldExistence] ):
keyword[return] identifier[expression]
identifier[location_type] = identifier[query_metadata_table] . identifier[get_location_info] ( identifier[expression] . identifier[location] ). identifier[type]
keyword[return] identifier[BinaryComposition] (
literal[string] ,
identifier[ContextField] ( identifier[expression] . identifier[location] , identifier[location_type] ),
identifier[NullLiteral] )
keyword[def] identifier[construct_result_visitor_fn] ( identifier[expression] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[expression] , identifier[ContextFieldExistence] ):
keyword[return] identifier[expression]
identifier[location_type] = identifier[query_metadata_table] . identifier[get_location_info] ( identifier[expression] . identifier[location] ). identifier[type]
keyword[return] identifier[BinaryComposition] (
literal[string] ,
identifier[OutputContextVertex] ( identifier[expression] . identifier[location] , identifier[location_type] ),
identifier[NullLiteral] )
identifier[new_ir_blocks] =[]
keyword[for] identifier[block] keyword[in] identifier[ir_blocks] :
identifier[new_block] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[block] , identifier[ConstructResult] ):
identifier[new_block] = identifier[block] . identifier[visit_and_update_expressions] ( identifier[construct_result_visitor_fn] )
keyword[else] :
identifier[new_block] = identifier[block] . identifier[visit_and_update_expressions] ( identifier[regular_visitor_fn] )
identifier[new_ir_blocks] . identifier[append] ( identifier[new_block] )
keyword[return] identifier[new_ir_blocks]
|
def lower_context_field_existence(ir_blocks, query_metadata_table):
"""Lower ContextFieldExistence expressions into lower-level expressions."""
def regular_visitor_fn(expression):
"""Expression visitor function that rewrites ContextFieldExistence expressions."""
if not isinstance(expression, ContextFieldExistence):
return expression # depends on [control=['if'], data=[]]
location_type = query_metadata_table.get_location_info(expression.location).type
# Since this function is only used in blocks that aren't ConstructResult,
# the location check is performed using a regular ContextField expression.
return BinaryComposition(u'!=', ContextField(expression.location, location_type), NullLiteral)
def construct_result_visitor_fn(expression):
"""Expression visitor function that rewrites ContextFieldExistence expressions."""
if not isinstance(expression, ContextFieldExistence):
return expression # depends on [control=['if'], data=[]]
location_type = query_metadata_table.get_location_info(expression.location).type
# Since this function is only used in ConstructResult blocks,
# the location check is performed using the special OutputContextVertex expression.
return BinaryComposition(u'!=', OutputContextVertex(expression.location, location_type), NullLiteral)
new_ir_blocks = []
for block in ir_blocks:
new_block = None
if isinstance(block, ConstructResult):
new_block = block.visit_and_update_expressions(construct_result_visitor_fn) # depends on [control=['if'], data=[]]
else:
new_block = block.visit_and_update_expressions(regular_visitor_fn)
new_ir_blocks.append(new_block) # depends on [control=['for'], data=['block']]
return new_ir_blocks
|
def spawn(mode, func, *args, **kwargs):
"""Spawns a thread-like object which runs the given function concurrently.
Available modes:
- `threading`
- `greenlet`
- `eventlet`
"""
if mode is None:
# 'threading' is the default mode.
mode = 'threading'
elif mode not in spawn.modes:
# validate the given mode.
raise ValueError('Invalid spawn mode: %s' % mode)
if mode == 'threading':
return spawn_thread(func, *args, **kwargs)
elif mode == 'gevent':
import gevent
import gevent.monkey
gevent.monkey.patch_select()
gevent.monkey.patch_socket()
return gevent.spawn(func, *args, **kwargs)
elif mode == 'eventlet':
import eventlet
eventlet.patcher.monkey_patch(select=True, socket=True)
return eventlet.spawn(func, *args, **kwargs)
assert False
|
def function[spawn, parameter[mode, func]]:
constant[Spawns a thread-like object which runs the given function concurrently.
Available modes:
- `threading`
- `greenlet`
- `eventlet`
]
if compare[name[mode] is constant[None]] begin[:]
variable[mode] assign[=] constant[threading]
if compare[name[mode] equal[==] constant[threading]] begin[:]
return[call[name[spawn_thread], parameter[name[func], <ast.Starred object at 0x7da1b11716f0>]]]
assert[constant[False]]
|
keyword[def] identifier[spawn] ( identifier[mode] , identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[mode] keyword[is] keyword[None] :
identifier[mode] = literal[string]
keyword[elif] identifier[mode] keyword[not] keyword[in] identifier[spawn] . identifier[modes] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[mode] )
keyword[if] identifier[mode] == literal[string] :
keyword[return] identifier[spawn_thread] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[mode] == literal[string] :
keyword[import] identifier[gevent]
keyword[import] identifier[gevent] . identifier[monkey]
identifier[gevent] . identifier[monkey] . identifier[patch_select] ()
identifier[gevent] . identifier[monkey] . identifier[patch_socket] ()
keyword[return] identifier[gevent] . identifier[spawn] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[mode] == literal[string] :
keyword[import] identifier[eventlet]
identifier[eventlet] . identifier[patcher] . identifier[monkey_patch] ( identifier[select] = keyword[True] , identifier[socket] = keyword[True] )
keyword[return] identifier[eventlet] . identifier[spawn] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] )
keyword[assert] keyword[False]
|
def spawn(mode, func, *args, **kwargs):
"""Spawns a thread-like object which runs the given function concurrently.
Available modes:
- `threading`
- `greenlet`
- `eventlet`
"""
if mode is None:
# 'threading' is the default mode.
mode = 'threading' # depends on [control=['if'], data=['mode']]
elif mode not in spawn.modes:
# validate the given mode.
raise ValueError('Invalid spawn mode: %s' % mode) # depends on [control=['if'], data=['mode']]
if mode == 'threading':
return spawn_thread(func, *args, **kwargs) # depends on [control=['if'], data=[]]
elif mode == 'gevent':
import gevent
import gevent.monkey
gevent.monkey.patch_select()
gevent.monkey.patch_socket()
return gevent.spawn(func, *args, **kwargs) # depends on [control=['if'], data=[]]
elif mode == 'eventlet':
import eventlet
eventlet.patcher.monkey_patch(select=True, socket=True)
return eventlet.spawn(func, *args, **kwargs) # depends on [control=['if'], data=[]]
assert False
|
def _check_response_for_errors(self, response):
"""Checks the response for errors and raises one if any exists."""
try:
doc = minidom.parseString(_string(response).replace("opensearch:", ""))
except Exception as e:
raise MalformedResponseError(self.network, e)
e = doc.getElementsByTagName("lfm")[0]
# logger.debug(doc.toprettyxml())
if e.getAttribute("status") != "ok":
e = doc.getElementsByTagName("error")[0]
status = e.getAttribute("code")
details = e.firstChild.data.strip()
raise WSError(self.network, status, details)
|
def function[_check_response_for_errors, parameter[self, response]]:
constant[Checks the response for errors and raises one if any exists.]
<ast.Try object at 0x7da1b0b656c0>
variable[e] assign[=] call[call[name[doc].getElementsByTagName, parameter[constant[lfm]]]][constant[0]]
if compare[call[name[e].getAttribute, parameter[constant[status]]] not_equal[!=] constant[ok]] begin[:]
variable[e] assign[=] call[call[name[doc].getElementsByTagName, parameter[constant[error]]]][constant[0]]
variable[status] assign[=] call[name[e].getAttribute, parameter[constant[code]]]
variable[details] assign[=] call[name[e].firstChild.data.strip, parameter[]]
<ast.Raise object at 0x7da1b0b664a0>
|
keyword[def] identifier[_check_response_for_errors] ( identifier[self] , identifier[response] ):
literal[string]
keyword[try] :
identifier[doc] = identifier[minidom] . identifier[parseString] ( identifier[_string] ( identifier[response] ). identifier[replace] ( literal[string] , literal[string] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[MalformedResponseError] ( identifier[self] . identifier[network] , identifier[e] )
identifier[e] = identifier[doc] . identifier[getElementsByTagName] ( literal[string] )[ literal[int] ]
keyword[if] identifier[e] . identifier[getAttribute] ( literal[string] )!= literal[string] :
identifier[e] = identifier[doc] . identifier[getElementsByTagName] ( literal[string] )[ literal[int] ]
identifier[status] = identifier[e] . identifier[getAttribute] ( literal[string] )
identifier[details] = identifier[e] . identifier[firstChild] . identifier[data] . identifier[strip] ()
keyword[raise] identifier[WSError] ( identifier[self] . identifier[network] , identifier[status] , identifier[details] )
|
def _check_response_for_errors(self, response):
"""Checks the response for errors and raises one if any exists."""
try:
doc = minidom.parseString(_string(response).replace('opensearch:', '')) # depends on [control=['try'], data=[]]
except Exception as e:
raise MalformedResponseError(self.network, e) # depends on [control=['except'], data=['e']]
e = doc.getElementsByTagName('lfm')[0]
# logger.debug(doc.toprettyxml())
if e.getAttribute('status') != 'ok':
e = doc.getElementsByTagName('error')[0]
status = e.getAttribute('code')
details = e.firstChild.data.strip()
raise WSError(self.network, status, details) # depends on [control=['if'], data=[]]
|
def concordance_index_ipcw(survival_train, survival_test, estimate, tau=None, tied_tol=1e-8):
"""Concordance index for right-censored data based on inverse probability of censoring weights.
This is an alternative to the estimator in :func:`concordance_index_censored`
that does not depend on the distribution of censoring times in the test data.
Therefore, the estimate is unbiased and consistent for a population concordance
measure that is free of censoring.
It is based on inverse probability of censoring weights, thus requires
access to survival times from the training data to estimate the censoring
distribution. Note that this requires that survival times `survival_test`
lie within the range of survival times `survival_train`. This can be
achieved by specifying the truncation time `tau`.
The resulting `cindex` tells how well the given prediction model works in
predicting events that occur in the time range from 0 to `tau`.
The estimator uses the Kaplan-Meier estimator to estimate the
censoring survivor function. Therefore, it is restricted to
situations where the random censoring assumption holds and
censoring is independent of the features.
Parameters
----------
survival_train : structured array, shape = (n_train_samples,)
Survival times for training data to estimate the censoring
distribution from.
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
survival_test : structured array, shape = (n_samples,)
Survival times of test data.
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
estimate : array-like, shape = (n_samples,)
Estimated risk of experiencing an event of test data.
tau : float, optional
Truncation time. The survival function for the underlying
censoring time distribution :math:`D` needs to be positive
at `tau`, i.e., `tau` should be chosen such that the
probability of being censored after time `tau` is non-zero:
:math:`P(D > \\tau) > 0`. If `None`, no truncation is performed.
tied_tol : float, optional, default: 1e-8
The tolerance value for considering ties.
If the absolute difference between risk scores is smaller
or equal than `tied_tol`, risk scores are considered tied.
Returns
-------
cindex : float
Concordance index
concordant : int
Number of concordant pairs
discordant : int
Number of discordant pairs
tied_risk : int
Number of pairs having tied estimated risks
tied_time : int
Number of comparable pairs sharing the same time
References
----------
.. [1] Uno, H., Cai, T., Pencina, M. J., D’Agostino, R. B., & Wei, L. J. (2011).
"On the C-statistics for evaluating overall adequacy of risk prediction
procedures with censored survival data".
Statistics in Medicine, 30(10), 1105–1117.
"""
test_event, test_time = check_y_survival(survival_test)
if tau is not None:
survival_test = survival_test[test_time < tau]
estimate = check_array(estimate, ensure_2d=False)
check_consistent_length(test_event, test_time, estimate)
cens = CensoringDistributionEstimator()
cens.fit(survival_train)
ipcw = cens.predict_ipcw(survival_test)
w = numpy.square(ipcw)
return _estimate_concordance_index(test_event, test_time, estimate, w, tied_tol)
|
def function[concordance_index_ipcw, parameter[survival_train, survival_test, estimate, tau, tied_tol]]:
constant[Concordance index for right-censored data based on inverse probability of censoring weights.
This is an alternative to the estimator in :func:`concordance_index_censored`
that does not depend on the distribution of censoring times in the test data.
Therefore, the estimate is unbiased and consistent for a population concordance
measure that is free of censoring.
It is based on inverse probability of censoring weights, thus requires
access to survival times from the training data to estimate the censoring
distribution. Note that this requires that survival times `survival_test`
lie within the range of survival times `survival_train`. This can be
achieved by specifying the truncation time `tau`.
The resulting `cindex` tells how well the given prediction model works in
predicting events that occur in the time range from 0 to `tau`.
The estimator uses the Kaplan-Meier estimator to estimate the
censoring survivor function. Therefore, it is restricted to
situations where the random censoring assumption holds and
censoring is independent of the features.
Parameters
----------
survival_train : structured array, shape = (n_train_samples,)
Survival times for training data to estimate the censoring
distribution from.
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
survival_test : structured array, shape = (n_samples,)
Survival times of test data.
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
estimate : array-like, shape = (n_samples,)
Estimated risk of experiencing an event of test data.
tau : float, optional
Truncation time. The survival function for the underlying
censoring time distribution :math:`D` needs to be positive
at `tau`, i.e., `tau` should be chosen such that the
probability of being censored after time `tau` is non-zero:
:math:`P(D > \tau) > 0`. If `None`, no truncation is performed.
tied_tol : float, optional, default: 1e-8
The tolerance value for considering ties.
If the absolute difference between risk scores is smaller
or equal than `tied_tol`, risk scores are considered tied.
Returns
-------
cindex : float
Concordance index
concordant : int
Number of concordant pairs
discordant : int
Number of discordant pairs
tied_risk : int
Number of pairs having tied estimated risks
tied_time : int
Number of comparable pairs sharing the same time
References
----------
.. [1] Uno, H., Cai, T., Pencina, M. J., D’Agostino, R. B., & Wei, L. J. (2011).
"On the C-statistics for evaluating overall adequacy of risk prediction
procedures with censored survival data".
Statistics in Medicine, 30(10), 1105–1117.
]
<ast.Tuple object at 0x7da1b1880940> assign[=] call[name[check_y_survival], parameter[name[survival_test]]]
if compare[name[tau] is_not constant[None]] begin[:]
variable[survival_test] assign[=] call[name[survival_test]][compare[name[test_time] less[<] name[tau]]]
variable[estimate] assign[=] call[name[check_array], parameter[name[estimate]]]
call[name[check_consistent_length], parameter[name[test_event], name[test_time], name[estimate]]]
variable[cens] assign[=] call[name[CensoringDistributionEstimator], parameter[]]
call[name[cens].fit, parameter[name[survival_train]]]
variable[ipcw] assign[=] call[name[cens].predict_ipcw, parameter[name[survival_test]]]
variable[w] assign[=] call[name[numpy].square, parameter[name[ipcw]]]
return[call[name[_estimate_concordance_index], parameter[name[test_event], name[test_time], name[estimate], name[w], name[tied_tol]]]]
|
keyword[def] identifier[concordance_index_ipcw] ( identifier[survival_train] , identifier[survival_test] , identifier[estimate] , identifier[tau] = keyword[None] , identifier[tied_tol] = literal[int] ):
literal[string]
identifier[test_event] , identifier[test_time] = identifier[check_y_survival] ( identifier[survival_test] )
keyword[if] identifier[tau] keyword[is] keyword[not] keyword[None] :
identifier[survival_test] = identifier[survival_test] [ identifier[test_time] < identifier[tau] ]
identifier[estimate] = identifier[check_array] ( identifier[estimate] , identifier[ensure_2d] = keyword[False] )
identifier[check_consistent_length] ( identifier[test_event] , identifier[test_time] , identifier[estimate] )
identifier[cens] = identifier[CensoringDistributionEstimator] ()
identifier[cens] . identifier[fit] ( identifier[survival_train] )
identifier[ipcw] = identifier[cens] . identifier[predict_ipcw] ( identifier[survival_test] )
identifier[w] = identifier[numpy] . identifier[square] ( identifier[ipcw] )
keyword[return] identifier[_estimate_concordance_index] ( identifier[test_event] , identifier[test_time] , identifier[estimate] , identifier[w] , identifier[tied_tol] )
|
def concordance_index_ipcw(survival_train, survival_test, estimate, tau=None, tied_tol=1e-08):
"""Concordance index for right-censored data based on inverse probability of censoring weights.
This is an alternative to the estimator in :func:`concordance_index_censored`
that does not depend on the distribution of censoring times in the test data.
Therefore, the estimate is unbiased and consistent for a population concordance
measure that is free of censoring.
It is based on inverse probability of censoring weights, thus requires
access to survival times from the training data to estimate the censoring
distribution. Note that this requires that survival times `survival_test`
lie within the range of survival times `survival_train`. This can be
achieved by specifying the truncation time `tau`.
The resulting `cindex` tells how well the given prediction model works in
predicting events that occur in the time range from 0 to `tau`.
The estimator uses the Kaplan-Meier estimator to estimate the
censoring survivor function. Therefore, it is restricted to
situations where the random censoring assumption holds and
censoring is independent of the features.
Parameters
----------
survival_train : structured array, shape = (n_train_samples,)
Survival times for training data to estimate the censoring
distribution from.
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
survival_test : structured array, shape = (n_samples,)
Survival times of test data.
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
estimate : array-like, shape = (n_samples,)
Estimated risk of experiencing an event of test data.
tau : float, optional
Truncation time. The survival function for the underlying
censoring time distribution :math:`D` needs to be positive
at `tau`, i.e., `tau` should be chosen such that the
probability of being censored after time `tau` is non-zero:
:math:`P(D > \\tau) > 0`. If `None`, no truncation is performed.
tied_tol : float, optional, default: 1e-8
The tolerance value for considering ties.
If the absolute difference between risk scores is smaller
or equal than `tied_tol`, risk scores are considered tied.
Returns
-------
cindex : float
Concordance index
concordant : int
Number of concordant pairs
discordant : int
Number of discordant pairs
tied_risk : int
Number of pairs having tied estimated risks
tied_time : int
Number of comparable pairs sharing the same time
References
----------
.. [1] Uno, H., Cai, T., Pencina, M. J., D’Agostino, R. B., & Wei, L. J. (2011).
"On the C-statistics for evaluating overall adequacy of risk prediction
procedures with censored survival data".
Statistics in Medicine, 30(10), 1105–1117.
"""
(test_event, test_time) = check_y_survival(survival_test)
if tau is not None:
survival_test = survival_test[test_time < tau] # depends on [control=['if'], data=['tau']]
estimate = check_array(estimate, ensure_2d=False)
check_consistent_length(test_event, test_time, estimate)
cens = CensoringDistributionEstimator()
cens.fit(survival_train)
ipcw = cens.predict_ipcw(survival_test)
w = numpy.square(ipcw)
return _estimate_concordance_index(test_event, test_time, estimate, w, tied_tol)
|
def _write_config(self, cfg, slot):
""" Write configuration to YubiKey. """
old_pgm_seq = self._status.pgm_seq
frame = cfg.to_frame(slot=slot)
self._debug("Writing %s frame :\n%s\n" % \
(yubikey_config.command2str(frame.command), cfg))
self._write(frame)
self._waitfor_clear(yubikey_defs.SLOT_WRITE_FLAG)
# make sure we have a fresh pgm_seq value
self.status()
self._debug("Programmed slot %i, sequence %i -> %i\n" % (slot, old_pgm_seq, self._status.pgm_seq))
cfgs = self._status.valid_configs()
if not cfgs and self._status.pgm_seq == 0:
return
if self._status.pgm_seq == old_pgm_seq + 1:
return
raise YubiKeyUSBHIDError('YubiKey programming failed (seq %i not increased (%i))' % \
(old_pgm_seq, self._status.pgm_seq))
|
def function[_write_config, parameter[self, cfg, slot]]:
constant[ Write configuration to YubiKey. ]
variable[old_pgm_seq] assign[=] name[self]._status.pgm_seq
variable[frame] assign[=] call[name[cfg].to_frame, parameter[]]
call[name[self]._debug, parameter[binary_operation[constant[Writing %s frame :
%s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b08d99c0>, <ast.Name object at 0x7da2047e8070>]]]]]
call[name[self]._write, parameter[name[frame]]]
call[name[self]._waitfor_clear, parameter[name[yubikey_defs].SLOT_WRITE_FLAG]]
call[name[self].status, parameter[]]
call[name[self]._debug, parameter[binary_operation[constant[Programmed slot %i, sequence %i -> %i
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b08454b0>, <ast.Name object at 0x7da1b0846d10>, <ast.Attribute object at 0x7da1b0844cd0>]]]]]
variable[cfgs] assign[=] call[name[self]._status.valid_configs, parameter[]]
if <ast.BoolOp object at 0x7da1b08474c0> begin[:]
return[None]
if compare[name[self]._status.pgm_seq equal[==] binary_operation[name[old_pgm_seq] + constant[1]]] begin[:]
return[None]
<ast.Raise object at 0x7da1b0844c70>
|
keyword[def] identifier[_write_config] ( identifier[self] , identifier[cfg] , identifier[slot] ):
literal[string]
identifier[old_pgm_seq] = identifier[self] . identifier[_status] . identifier[pgm_seq]
identifier[frame] = identifier[cfg] . identifier[to_frame] ( identifier[slot] = identifier[slot] )
identifier[self] . identifier[_debug] ( literal[string] %( identifier[yubikey_config] . identifier[command2str] ( identifier[frame] . identifier[command] ), identifier[cfg] ))
identifier[self] . identifier[_write] ( identifier[frame] )
identifier[self] . identifier[_waitfor_clear] ( identifier[yubikey_defs] . identifier[SLOT_WRITE_FLAG] )
identifier[self] . identifier[status] ()
identifier[self] . identifier[_debug] ( literal[string] %( identifier[slot] , identifier[old_pgm_seq] , identifier[self] . identifier[_status] . identifier[pgm_seq] ))
identifier[cfgs] = identifier[self] . identifier[_status] . identifier[valid_configs] ()
keyword[if] keyword[not] identifier[cfgs] keyword[and] identifier[self] . identifier[_status] . identifier[pgm_seq] == literal[int] :
keyword[return]
keyword[if] identifier[self] . identifier[_status] . identifier[pgm_seq] == identifier[old_pgm_seq] + literal[int] :
keyword[return]
keyword[raise] identifier[YubiKeyUSBHIDError] ( literal[string] %( identifier[old_pgm_seq] , identifier[self] . identifier[_status] . identifier[pgm_seq] ))
|
def _write_config(self, cfg, slot):
""" Write configuration to YubiKey. """
old_pgm_seq = self._status.pgm_seq
frame = cfg.to_frame(slot=slot)
self._debug('Writing %s frame :\n%s\n' % (yubikey_config.command2str(frame.command), cfg))
self._write(frame)
self._waitfor_clear(yubikey_defs.SLOT_WRITE_FLAG)
# make sure we have a fresh pgm_seq value
self.status()
self._debug('Programmed slot %i, sequence %i -> %i\n' % (slot, old_pgm_seq, self._status.pgm_seq))
cfgs = self._status.valid_configs()
if not cfgs and self._status.pgm_seq == 0:
return # depends on [control=['if'], data=[]]
if self._status.pgm_seq == old_pgm_seq + 1:
return # depends on [control=['if'], data=[]]
raise YubiKeyUSBHIDError('YubiKey programming failed (seq %i not increased (%i))' % (old_pgm_seq, self._status.pgm_seq))
|
def _default(self, obj: object):
""" Return a serializable version of obj. Overrides JsonObj _default method
:param obj: Object to be serialized
:return: Serialized version of obj
"""
return None if obj is JSGNull else obj.val if type(obj) is AnyType else \
JSGObject._strip_nones(obj.__dict__) if isinstance(obj, JsonObj) \
else cast(JSGString, obj).val if issubclass(type(obj), JSGString) else str(obj)
|
def function[_default, parameter[self, obj]]:
constant[ Return a serializable version of obj. Overrides JsonObj _default method
:param obj: Object to be serialized
:return: Serialized version of obj
]
return[<ast.IfExp object at 0x7da20c9915d0>]
|
keyword[def] identifier[_default] ( identifier[self] , identifier[obj] : identifier[object] ):
literal[string]
keyword[return] keyword[None] keyword[if] identifier[obj] keyword[is] identifier[JSGNull] keyword[else] identifier[obj] . identifier[val] keyword[if] identifier[type] ( identifier[obj] ) keyword[is] identifier[AnyType] keyword[else] identifier[JSGObject] . identifier[_strip_nones] ( identifier[obj] . identifier[__dict__] ) keyword[if] identifier[isinstance] ( identifier[obj] , identifier[JsonObj] ) keyword[else] identifier[cast] ( identifier[JSGString] , identifier[obj] ). identifier[val] keyword[if] identifier[issubclass] ( identifier[type] ( identifier[obj] ), identifier[JSGString] ) keyword[else] identifier[str] ( identifier[obj] )
|
def _default(self, obj: object):
""" Return a serializable version of obj. Overrides JsonObj _default method
:param obj: Object to be serialized
:return: Serialized version of obj
"""
return None if obj is JSGNull else obj.val if type(obj) is AnyType else JSGObject._strip_nones(obj.__dict__) if isinstance(obj, JsonObj) else cast(JSGString, obj).val if issubclass(type(obj), JSGString) else str(obj)
|
def create_volume(zone_name, size=None, snapshot_id=None, volume_type=None,
iops=None, encrypted=False, kms_key_id=None, wait_for_creation=False,
region=None, key=None, keyid=None, profile=None):
'''
Create an EBS volume to an availability zone.
..
zone_name
(string) – The Availability zone name of the EBS volume to be created.
size
(int) – The size of the new volume, in GiB. If you're creating the
volume from a snapshot and don't specify a volume size, the
default is the snapshot size.
snapshot_id
(string) – The snapshot ID from which the new volume will be created.
volume_type
(string) - The type of the volume. Valid volume types for AWS can be found here:
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
iops
(int) - The provisioned IOPS you want to associate with this volume.
encrypted
(bool) - Specifies whether the volume should be encrypted.
kms_key_id
(string) - If encrypted is True, this KMS Key ID may be specified to
encrypt volume with this key
e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef
wait_for_creation
(bool) - Whether or not to wait for volume creation to complete.
returns
(string) - created volume id on success, error message on failure.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.create_volume us-east-1a size=10
salt-call boto_ec2.create_volume us-east-1a snapshot_id=snap-0123abcd
'''
if size is None and snapshot_id is None:
raise SaltInvocationError(
'Size must be provided if not created from snapshot.'
)
ret = {}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
vol = conn.create_volume(size=size, zone=zone_name, snapshot=snapshot_id,
volume_type=volume_type, iops=iops, encrypted=encrypted,
kms_key_id=kms_key_id)
if wait_for_creation and not _wait_for_volume_available(conn, vol.id):
timeout_msg = 'Timed out waiting for the volume status "available".'
log.error(timeout_msg)
ret['error'] = timeout_msg
else:
ret['result'] = vol.id
except boto.exception.BotoServerError as error:
ret['error'] = __utils__['boto.get_error'](error)
return ret
|
def function[create_volume, parameter[zone_name, size, snapshot_id, volume_type, iops, encrypted, kms_key_id, wait_for_creation, region, key, keyid, profile]]:
constant[
Create an EBS volume to an availability zone.
..
zone_name
(string) – The Availability zone name of the EBS volume to be created.
size
(int) – The size of the new volume, in GiB. If you're creating the
volume from a snapshot and don't specify a volume size, the
default is the snapshot size.
snapshot_id
(string) – The snapshot ID from which the new volume will be created.
volume_type
(string) - The type of the volume. Valid volume types for AWS can be found here:
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
iops
(int) - The provisioned IOPS you want to associate with this volume.
encrypted
(bool) - Specifies whether the volume should be encrypted.
kms_key_id
(string) - If encrypted is True, this KMS Key ID may be specified to
encrypt volume with this key
e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef
wait_for_creation
(bool) - Whether or not to wait for volume creation to complete.
returns
(string) - created volume id on success, error message on failure.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.create_volume us-east-1a size=10
salt-call boto_ec2.create_volume us-east-1a snapshot_id=snap-0123abcd
]
if <ast.BoolOp object at 0x7da18f00fc70> begin[:]
<ast.Raise object at 0x7da18f00d300>
variable[ret] assign[=] dictionary[[], []]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
<ast.Try object at 0x7da18f00c490>
return[name[ret]]
|
keyword[def] identifier[create_volume] ( identifier[zone_name] , identifier[size] = keyword[None] , identifier[snapshot_id] = keyword[None] , identifier[volume_type] = keyword[None] ,
identifier[iops] = keyword[None] , identifier[encrypted] = keyword[False] , identifier[kms_key_id] = keyword[None] , identifier[wait_for_creation] = keyword[False] ,
identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
keyword[if] identifier[size] keyword[is] keyword[None] keyword[and] identifier[snapshot_id] keyword[is] keyword[None] :
keyword[raise] identifier[SaltInvocationError] (
literal[string]
)
identifier[ret] ={}
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[try] :
identifier[vol] = identifier[conn] . identifier[create_volume] ( identifier[size] = identifier[size] , identifier[zone] = identifier[zone_name] , identifier[snapshot] = identifier[snapshot_id] ,
identifier[volume_type] = identifier[volume_type] , identifier[iops] = identifier[iops] , identifier[encrypted] = identifier[encrypted] ,
identifier[kms_key_id] = identifier[kms_key_id] )
keyword[if] identifier[wait_for_creation] keyword[and] keyword[not] identifier[_wait_for_volume_available] ( identifier[conn] , identifier[vol] . identifier[id] ):
identifier[timeout_msg] = literal[string]
identifier[log] . identifier[error] ( identifier[timeout_msg] )
identifier[ret] [ literal[string] ]= identifier[timeout_msg]
keyword[else] :
identifier[ret] [ literal[string] ]= identifier[vol] . identifier[id]
keyword[except] identifier[boto] . identifier[exception] . identifier[BotoServerError] keyword[as] identifier[error] :
identifier[ret] [ literal[string] ]= identifier[__utils__] [ literal[string] ]( identifier[error] )
keyword[return] identifier[ret]
|
def create_volume(zone_name, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, wait_for_creation=False, region=None, key=None, keyid=None, profile=None):
"""
Create an EBS volume to an availability zone.
..
zone_name
(string) – The Availability zone name of the EBS volume to be created.
size
(int) – The size of the new volume, in GiB. If you're creating the
volume from a snapshot and don't specify a volume size, the
default is the snapshot size.
snapshot_id
(string) – The snapshot ID from which the new volume will be created.
volume_type
(string) - The type of the volume. Valid volume types for AWS can be found here:
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
iops
(int) - The provisioned IOPS you want to associate with this volume.
encrypted
(bool) - Specifies whether the volume should be encrypted.
kms_key_id
(string) - If encrypted is True, this KMS Key ID may be specified to
encrypt volume with this key
e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef
wait_for_creation
(bool) - Whether or not to wait for volume creation to complete.
returns
(string) - created volume id on success, error message on failure.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.create_volume us-east-1a size=10
salt-call boto_ec2.create_volume us-east-1a snapshot_id=snap-0123abcd
"""
if size is None and snapshot_id is None:
raise SaltInvocationError('Size must be provided if not created from snapshot.') # depends on [control=['if'], data=[]]
ret = {}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
vol = conn.create_volume(size=size, zone=zone_name, snapshot=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id)
if wait_for_creation and (not _wait_for_volume_available(conn, vol.id)):
timeout_msg = 'Timed out waiting for the volume status "available".'
log.error(timeout_msg)
ret['error'] = timeout_msg # depends on [control=['if'], data=[]]
else:
ret['result'] = vol.id # depends on [control=['try'], data=[]]
except boto.exception.BotoServerError as error:
ret['error'] = __utils__['boto.get_error'](error) # depends on [control=['except'], data=['error']]
return ret
|
def imag(self):
"""Imaginary part of ``self``.
Returns
-------
imag : `NumpyTensor`
Imaginary part this element as an element of a
`NumpyTensorSpace` with real data type.
Examples
--------
Get the imaginary part:
>>> space = odl.cn(3)
>>> x = space.element([1 + 1j, 2, 3 - 3j])
>>> x.imag
rn(3).element([ 1., 0., -3.])
Set the imaginary part:
>>> space = odl.cn(3)
>>> x = space.element([1 + 1j, 2, 3 - 3j])
>>> zero = odl.rn(3).zero()
>>> x.imag = zero
>>> x
cn(3).element([ 1.+0.j, 2.+0.j, 3.+0.j])
Other array-like types and broadcasting:
>>> x.imag = 1.0
>>> x
cn(3).element([ 1.+1.j, 2.+1.j, 3.+1.j])
>>> x.imag = [2, 3, 4]
>>> x
cn(3).element([ 1.+2.j, 2.+3.j, 3.+4.j])
"""
if self.space.is_real:
return self.space.zero()
elif self.space.is_complex:
real_space = self.space.astype(self.space.real_dtype)
return real_space.element(self.data.imag)
else:
raise NotImplementedError('`imag` not defined for non-numeric '
'dtype {}'.format(self.dtype))
|
def function[imag, parameter[self]]:
constant[Imaginary part of ``self``.
Returns
-------
imag : `NumpyTensor`
Imaginary part this element as an element of a
`NumpyTensorSpace` with real data type.
Examples
--------
Get the imaginary part:
>>> space = odl.cn(3)
>>> x = space.element([1 + 1j, 2, 3 - 3j])
>>> x.imag
rn(3).element([ 1., 0., -3.])
Set the imaginary part:
>>> space = odl.cn(3)
>>> x = space.element([1 + 1j, 2, 3 - 3j])
>>> zero = odl.rn(3).zero()
>>> x.imag = zero
>>> x
cn(3).element([ 1.+0.j, 2.+0.j, 3.+0.j])
Other array-like types and broadcasting:
>>> x.imag = 1.0
>>> x
cn(3).element([ 1.+1.j, 2.+1.j, 3.+1.j])
>>> x.imag = [2, 3, 4]
>>> x
cn(3).element([ 1.+2.j, 2.+3.j, 3.+4.j])
]
if name[self].space.is_real begin[:]
return[call[name[self].space.zero, parameter[]]]
|
keyword[def] identifier[imag] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[space] . identifier[is_real] :
keyword[return] identifier[self] . identifier[space] . identifier[zero] ()
keyword[elif] identifier[self] . identifier[space] . identifier[is_complex] :
identifier[real_space] = identifier[self] . identifier[space] . identifier[astype] ( identifier[self] . identifier[space] . identifier[real_dtype] )
keyword[return] identifier[real_space] . identifier[element] ( identifier[self] . identifier[data] . identifier[imag] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[dtype] ))
|
def imag(self):
"""Imaginary part of ``self``.
Returns
-------
imag : `NumpyTensor`
Imaginary part this element as an element of a
`NumpyTensorSpace` with real data type.
Examples
--------
Get the imaginary part:
>>> space = odl.cn(3)
>>> x = space.element([1 + 1j, 2, 3 - 3j])
>>> x.imag
rn(3).element([ 1., 0., -3.])
Set the imaginary part:
>>> space = odl.cn(3)
>>> x = space.element([1 + 1j, 2, 3 - 3j])
>>> zero = odl.rn(3).zero()
>>> x.imag = zero
>>> x
cn(3).element([ 1.+0.j, 2.+0.j, 3.+0.j])
Other array-like types and broadcasting:
>>> x.imag = 1.0
>>> x
cn(3).element([ 1.+1.j, 2.+1.j, 3.+1.j])
>>> x.imag = [2, 3, 4]
>>> x
cn(3).element([ 1.+2.j, 2.+3.j, 3.+4.j])
"""
if self.space.is_real:
return self.space.zero() # depends on [control=['if'], data=[]]
elif self.space.is_complex:
real_space = self.space.astype(self.space.real_dtype)
return real_space.element(self.data.imag) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('`imag` not defined for non-numeric dtype {}'.format(self.dtype))
|
def list_command_pydb(self, arg):
"""List command to use if we have a newer pydb installed"""
filename, first, last = OldPdb.parse_list_cmd(self, arg)
if filename is not None:
self.print_list_lines(filename, first, last)
|
def function[list_command_pydb, parameter[self, arg]]:
constant[List command to use if we have a newer pydb installed]
<ast.Tuple object at 0x7da204346bc0> assign[=] call[name[OldPdb].parse_list_cmd, parameter[name[self], name[arg]]]
if compare[name[filename] is_not constant[None]] begin[:]
call[name[self].print_list_lines, parameter[name[filename], name[first], name[last]]]
|
keyword[def] identifier[list_command_pydb] ( identifier[self] , identifier[arg] ):
literal[string]
identifier[filename] , identifier[first] , identifier[last] = identifier[OldPdb] . identifier[parse_list_cmd] ( identifier[self] , identifier[arg] )
keyword[if] identifier[filename] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[print_list_lines] ( identifier[filename] , identifier[first] , identifier[last] )
|
def list_command_pydb(self, arg):
"""List command to use if we have a newer pydb installed"""
(filename, first, last) = OldPdb.parse_list_cmd(self, arg)
if filename is not None:
self.print_list_lines(filename, first, last) # depends on [control=['if'], data=['filename']]
|
def _validate_time_range(trange, status, msg):
'''
Check time range
'''
# If trange is empty, just return the current status & msg
if not trange:
return status, msg
if not isinstance(trange, dict):
status = False
msg = ('The time_range parameter for '
'wtmp beacon must '
'be a dictionary.')
if not all(k in trange for k in ('start', 'end')):
status = False
msg = ('The time_range parameter for '
'wtmp beacon must contain '
'start & end options.')
return status, msg
|
def function[_validate_time_range, parameter[trange, status, msg]]:
constant[
Check time range
]
if <ast.UnaryOp object at 0x7da1b217d960> begin[:]
return[tuple[[<ast.Name object at 0x7da1b217dc00>, <ast.Name object at 0x7da1b217c490>]]]
if <ast.UnaryOp object at 0x7da1b217cc40> begin[:]
variable[status] assign[=] constant[False]
variable[msg] assign[=] constant[The time_range parameter for wtmp beacon must be a dictionary.]
if <ast.UnaryOp object at 0x7da1b217c7c0> begin[:]
variable[status] assign[=] constant[False]
variable[msg] assign[=] constant[The time_range parameter for wtmp beacon must contain start & end options.]
return[tuple[[<ast.Name object at 0x7da1b1ff01f0>, <ast.Name object at 0x7da1b1ff0c40>]]]
|
keyword[def] identifier[_validate_time_range] ( identifier[trange] , identifier[status] , identifier[msg] ):
literal[string]
keyword[if] keyword[not] identifier[trange] :
keyword[return] identifier[status] , identifier[msg]
keyword[if] keyword[not] identifier[isinstance] ( identifier[trange] , identifier[dict] ):
identifier[status] = keyword[False]
identifier[msg] =( literal[string]
literal[string]
literal[string] )
keyword[if] keyword[not] identifier[all] ( identifier[k] keyword[in] identifier[trange] keyword[for] identifier[k] keyword[in] ( literal[string] , literal[string] )):
identifier[status] = keyword[False]
identifier[msg] =( literal[string]
literal[string]
literal[string] )
keyword[return] identifier[status] , identifier[msg]
|
def _validate_time_range(trange, status, msg):
"""
Check time range
"""
# If trange is empty, just return the current status & msg
if not trange:
return (status, msg) # depends on [control=['if'], data=[]]
if not isinstance(trange, dict):
status = False
msg = 'The time_range parameter for wtmp beacon must be a dictionary.' # depends on [control=['if'], data=[]]
if not all((k in trange for k in ('start', 'end'))):
status = False
msg = 'The time_range parameter for wtmp beacon must contain start & end options.' # depends on [control=['if'], data=[]]
return (status, msg)
|
def lookup_blob(hash_value):
"""
Combines all given arguments to create clean title-tags values.
All arguments are divided by a " " seperator and HTML tags
are to be removed.
"""
try:
blob = BlobStorage.objects.get(sha256=hash_value)
except:
return "Blob not found"
return blob.content
|
def function[lookup_blob, parameter[hash_value]]:
constant[
Combines all given arguments to create clean title-tags values.
All arguments are divided by a " " seperator and HTML tags
are to be removed.
]
<ast.Try object at 0x7da1b16d71c0>
return[name[blob].content]
|
keyword[def] identifier[lookup_blob] ( identifier[hash_value] ):
literal[string]
keyword[try] :
identifier[blob] = identifier[BlobStorage] . identifier[objects] . identifier[get] ( identifier[sha256] = identifier[hash_value] )
keyword[except] :
keyword[return] literal[string]
keyword[return] identifier[blob] . identifier[content]
|
def lookup_blob(hash_value):
"""
Combines all given arguments to create clean title-tags values.
All arguments are divided by a " " seperator and HTML tags
are to be removed.
"""
try:
blob = BlobStorage.objects.get(sha256=hash_value) # depends on [control=['try'], data=[]]
except:
return 'Blob not found' # depends on [control=['except'], data=[]]
return blob.content
|
def data_uri(thumbnail):
"""
This filter will return the base64 encoded data URI for a given thumbnail object.
Example usage::
{% thumbnail sample_image 25x25 crop as thumb %}
<img src="{{ thumb|data_uri }}">
will for instance be rendered as:
<img src="data:image/png;base64,iVBORw0KGgo...">
"""
try:
thumbnail.open('rb')
data = thumbnail.read()
finally:
thumbnail.close()
mime_type = mimetypes.guess_type(str(thumbnail.file))[0] or 'application/octet-stream'
data = b64encode(data).decode('utf-8')
return 'data:{0};base64,{1}'.format(mime_type, data)
|
def function[data_uri, parameter[thumbnail]]:
constant[
This filter will return the base64 encoded data URI for a given thumbnail object.
Example usage::
{% thumbnail sample_image 25x25 crop as thumb %}
<img src="{{ thumb|data_uri }}">
will for instance be rendered as:
<img src="data:image/png;base64,iVBORw0KGgo...">
]
<ast.Try object at 0x7da18bcc96f0>
variable[mime_type] assign[=] <ast.BoolOp object at 0x7da1b12fdcf0>
variable[data] assign[=] call[call[name[b64encode], parameter[name[data]]].decode, parameter[constant[utf-8]]]
return[call[constant[data:{0};base64,{1}].format, parameter[name[mime_type], name[data]]]]
|
keyword[def] identifier[data_uri] ( identifier[thumbnail] ):
literal[string]
keyword[try] :
identifier[thumbnail] . identifier[open] ( literal[string] )
identifier[data] = identifier[thumbnail] . identifier[read] ()
keyword[finally] :
identifier[thumbnail] . identifier[close] ()
identifier[mime_type] = identifier[mimetypes] . identifier[guess_type] ( identifier[str] ( identifier[thumbnail] . identifier[file] ))[ literal[int] ] keyword[or] literal[string]
identifier[data] = identifier[b64encode] ( identifier[data] ). identifier[decode] ( literal[string] )
keyword[return] literal[string] . identifier[format] ( identifier[mime_type] , identifier[data] )
|
def data_uri(thumbnail):
"""
This filter will return the base64 encoded data URI for a given thumbnail object.
Example usage::
{% thumbnail sample_image 25x25 crop as thumb %}
<img src="{{ thumb|data_uri }}">
will for instance be rendered as:
<img src="data:image/png;base64,iVBORw0KGgo...">
"""
try:
thumbnail.open('rb')
data = thumbnail.read() # depends on [control=['try'], data=[]]
finally:
thumbnail.close()
mime_type = mimetypes.guess_type(str(thumbnail.file))[0] or 'application/octet-stream'
data = b64encode(data).decode('utf-8')
return 'data:{0};base64,{1}'.format(mime_type, data)
|
def get_offset_range(self, row_offset, column_offset):
"""
Gets an object which represents a range that's offset from the specified range.
The dimension of the returned range will match this range.
If the resulting range is forced outside the bounds of the worksheet grid,
an exception will be thrown.
:param int row_offset: The number of rows (positive, negative, or 0)
by which the range is to be offset.
:param int column_offset: he number of columns (positive, negative, or 0)
by which the range is to be offset.
:return: Range
"""
return self._get_range('offset_range', rowOffset=row_offset, columnOffset=column_offset)
|
def function[get_offset_range, parameter[self, row_offset, column_offset]]:
constant[
Gets an object which represents a range that's offset from the specified range.
The dimension of the returned range will match this range.
If the resulting range is forced outside the bounds of the worksheet grid,
an exception will be thrown.
:param int row_offset: The number of rows (positive, negative, or 0)
by which the range is to be offset.
:param int column_offset: he number of columns (positive, negative, or 0)
by which the range is to be offset.
:return: Range
]
return[call[name[self]._get_range, parameter[constant[offset_range]]]]
|
keyword[def] identifier[get_offset_range] ( identifier[self] , identifier[row_offset] , identifier[column_offset] ):
literal[string]
keyword[return] identifier[self] . identifier[_get_range] ( literal[string] , identifier[rowOffset] = identifier[row_offset] , identifier[columnOffset] = identifier[column_offset] )
|
def get_offset_range(self, row_offset, column_offset):
"""
Gets an object which represents a range that's offset from the specified range.
The dimension of the returned range will match this range.
If the resulting range is forced outside the bounds of the worksheet grid,
an exception will be thrown.
:param int row_offset: The number of rows (positive, negative, or 0)
by which the range is to be offset.
:param int column_offset: he number of columns (positive, negative, or 0)
by which the range is to be offset.
:return: Range
"""
return self._get_range('offset_range', rowOffset=row_offset, columnOffset=column_offset)
|
def inverse(self):
"""Return the (left) inverse.
If the domain is a real space, this is not a true inverse,
only a (left) inverse.
Examples
--------
>>> r3 = odl.rn(3)
>>> op = ComplexEmbedding(r3, scalar=1)
>>> op.inverse(op([1, 2, 4]))
rn(3).element([ 1., 2., 4.])
"""
if self.domain.is_real:
# Real domain
# Optimizations for simple cases.
if self.scalar.real == self.scalar:
return (1 / self.scalar.real) * RealPart(self.range)
elif 1j * self.scalar.imag == self.scalar:
return (1 / self.scalar.imag) * ImagPart(self.range)
else:
# General case
inv_scalar = (1 / self.scalar).conjugate()
return ((inv_scalar.real) * RealPart(self.range) +
(inv_scalar.imag) * ImagPart(self.range))
else:
# Complex domain
return ComplexEmbedding(self.range, self.scalar.conjugate())
|
def function[inverse, parameter[self]]:
constant[Return the (left) inverse.
If the domain is a real space, this is not a true inverse,
only a (left) inverse.
Examples
--------
>>> r3 = odl.rn(3)
>>> op = ComplexEmbedding(r3, scalar=1)
>>> op.inverse(op([1, 2, 4]))
rn(3).element([ 1., 2., 4.])
]
if name[self].domain.is_real begin[:]
if compare[name[self].scalar.real equal[==] name[self].scalar] begin[:]
return[binary_operation[binary_operation[constant[1] / name[self].scalar.real] * call[name[RealPart], parameter[name[self].range]]]]
|
keyword[def] identifier[inverse] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[domain] . identifier[is_real] :
keyword[if] identifier[self] . identifier[scalar] . identifier[real] == identifier[self] . identifier[scalar] :
keyword[return] ( literal[int] / identifier[self] . identifier[scalar] . identifier[real] )* identifier[RealPart] ( identifier[self] . identifier[range] )
keyword[elif] literal[int] * identifier[self] . identifier[scalar] . identifier[imag] == identifier[self] . identifier[scalar] :
keyword[return] ( literal[int] / identifier[self] . identifier[scalar] . identifier[imag] )* identifier[ImagPart] ( identifier[self] . identifier[range] )
keyword[else] :
identifier[inv_scalar] =( literal[int] / identifier[self] . identifier[scalar] ). identifier[conjugate] ()
keyword[return] (( identifier[inv_scalar] . identifier[real] )* identifier[RealPart] ( identifier[self] . identifier[range] )+
( identifier[inv_scalar] . identifier[imag] )* identifier[ImagPart] ( identifier[self] . identifier[range] ))
keyword[else] :
keyword[return] identifier[ComplexEmbedding] ( identifier[self] . identifier[range] , identifier[self] . identifier[scalar] . identifier[conjugate] ())
|
def inverse(self):
"""Return the (left) inverse.
If the domain is a real space, this is not a true inverse,
only a (left) inverse.
Examples
--------
>>> r3 = odl.rn(3)
>>> op = ComplexEmbedding(r3, scalar=1)
>>> op.inverse(op([1, 2, 4]))
rn(3).element([ 1., 2., 4.])
"""
if self.domain.is_real:
# Real domain
# Optimizations for simple cases.
if self.scalar.real == self.scalar:
return 1 / self.scalar.real * RealPart(self.range) # depends on [control=['if'], data=[]]
elif 1j * self.scalar.imag == self.scalar:
return 1 / self.scalar.imag * ImagPart(self.range) # depends on [control=['if'], data=[]]
else:
# General case
inv_scalar = (1 / self.scalar).conjugate()
return inv_scalar.real * RealPart(self.range) + inv_scalar.imag * ImagPart(self.range) # depends on [control=['if'], data=[]]
else:
# Complex domain
return ComplexEmbedding(self.range, self.scalar.conjugate())
|
def mass2_from_tau0_tau3(tau0, tau3, f_lower):
r"""Returns the secondary mass from the given :math:`\tau_0, \tau_3`."""
mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower)
eta = eta_from_tau0_tau3(tau0, tau3, f_lower)
return mass2_from_mtotal_eta(mtotal, eta)
|
def function[mass2_from_tau0_tau3, parameter[tau0, tau3, f_lower]]:
constant[Returns the secondary mass from the given :math:`\tau_0, \tau_3`.]
variable[mtotal] assign[=] call[name[mtotal_from_tau0_tau3], parameter[name[tau0], name[tau3], name[f_lower]]]
variable[eta] assign[=] call[name[eta_from_tau0_tau3], parameter[name[tau0], name[tau3], name[f_lower]]]
return[call[name[mass2_from_mtotal_eta], parameter[name[mtotal], name[eta]]]]
|
keyword[def] identifier[mass2_from_tau0_tau3] ( identifier[tau0] , identifier[tau3] , identifier[f_lower] ):
literal[string]
identifier[mtotal] = identifier[mtotal_from_tau0_tau3] ( identifier[tau0] , identifier[tau3] , identifier[f_lower] )
identifier[eta] = identifier[eta_from_tau0_tau3] ( identifier[tau0] , identifier[tau3] , identifier[f_lower] )
keyword[return] identifier[mass2_from_mtotal_eta] ( identifier[mtotal] , identifier[eta] )
|
def mass2_from_tau0_tau3(tau0, tau3, f_lower):
"""Returns the secondary mass from the given :math:`\\tau_0, \\tau_3`."""
mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower)
eta = eta_from_tau0_tau3(tau0, tau3, f_lower)
return mass2_from_mtotal_eta(mtotal, eta)
|
def backend_query(self, **kwargs):
'''Build and return the :class:`stdnet.utils.async.BackendQuery`.
This is a lazy method in the sense that it is evaluated once only and its
result stored for future retrieval.'''
q = self.construct()
return q if isinstance(q, EmptyQuery) else q.backend_query(**kwargs)
|
def function[backend_query, parameter[self]]:
constant[Build and return the :class:`stdnet.utils.async.BackendQuery`.
This is a lazy method in the sense that it is evaluated once only and its
result stored for future retrieval.]
variable[q] assign[=] call[name[self].construct, parameter[]]
return[<ast.IfExp object at 0x7da1b0e95420>]
|
keyword[def] identifier[backend_query] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[q] = identifier[self] . identifier[construct] ()
keyword[return] identifier[q] keyword[if] identifier[isinstance] ( identifier[q] , identifier[EmptyQuery] ) keyword[else] identifier[q] . identifier[backend_query] (** identifier[kwargs] )
|
def backend_query(self, **kwargs):
"""Build and return the :class:`stdnet.utils.async.BackendQuery`.
This is a lazy method in the sense that it is evaluated once only and its
result stored for future retrieval."""
q = self.construct()
return q if isinstance(q, EmptyQuery) else q.backend_query(**kwargs)
|
def _handle_stop_dag(self, request):
""" The handler for the stop_dag request.
The stop_dag request adds a dag to the list of dags that should be stopped.
The dag will then stop queueing new tasks and will eventually stop running.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be stopped
Returns:
Response: A response object containing the following fields:
- success: True if the dag was added successfully to the list
of dags that should be stopped.
"""
if (request.payload['name'] is not None) and \
(request.payload['name'] not in self._stop_dags):
self._stop_dags.append(request.payload['name'])
return Response(success=True, uid=request.uid)
|
def function[_handle_stop_dag, parameter[self, request]]:
constant[ The handler for the stop_dag request.
The stop_dag request adds a dag to the list of dags that should be stopped.
The dag will then stop queueing new tasks and will eventually stop running.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be stopped
Returns:
Response: A response object containing the following fields:
- success: True if the dag was added successfully to the list
of dags that should be stopped.
]
if <ast.BoolOp object at 0x7da1b10d6770> begin[:]
call[name[self]._stop_dags.append, parameter[call[name[request].payload][constant[name]]]]
return[call[name[Response], parameter[]]]
|
keyword[def] identifier[_handle_stop_dag] ( identifier[self] , identifier[request] ):
literal[string]
keyword[if] ( identifier[request] . identifier[payload] [ literal[string] ] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[request] . identifier[payload] [ literal[string] ] keyword[not] keyword[in] identifier[self] . identifier[_stop_dags] ):
identifier[self] . identifier[_stop_dags] . identifier[append] ( identifier[request] . identifier[payload] [ literal[string] ])
keyword[return] identifier[Response] ( identifier[success] = keyword[True] , identifier[uid] = identifier[request] . identifier[uid] )
|
def _handle_stop_dag(self, request):
""" The handler for the stop_dag request.
The stop_dag request adds a dag to the list of dags that should be stopped.
The dag will then stop queueing new tasks and will eventually stop running.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be stopped
Returns:
Response: A response object containing the following fields:
- success: True if the dag was added successfully to the list
of dags that should be stopped.
"""
if request.payload['name'] is not None and request.payload['name'] not in self._stop_dags:
self._stop_dags.append(request.payload['name']) # depends on [control=['if'], data=[]]
return Response(success=True, uid=request.uid)
|
def to_array(self):
"""
Serializes this InlineKeyboardButton to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(InlineKeyboardButton, self).to_array()
array['text'] = u(self.text) # py2: type unicode, py3: type str
if self.url is not None:
array['url'] = u(self.url) # py2: type unicode, py3: type str
if self.callback_data is not None:
array['callback_data'] = u(self.callback_data) # py2: type unicode, py3: type str
if self.switch_inline_query is not None:
array['switch_inline_query'] = u(self.switch_inline_query) # py2: type unicode, py3: type str
if self.switch_inline_query_current_chat is not None:
array['switch_inline_query_current_chat'] = u(self.switch_inline_query_current_chat) # py2: type unicode, py3: type str
if self.callback_game is not None:
array['callback_game'] = self.callback_game.to_array() # type CallbackGame
if self.pay is not None:
array['pay'] = bool(self.pay) # type bool
return array
|
def function[to_array, parameter[self]]:
constant[
Serializes this InlineKeyboardButton to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
]
variable[array] assign[=] call[call[name[super], parameter[name[InlineKeyboardButton], name[self]]].to_array, parameter[]]
call[name[array]][constant[text]] assign[=] call[name[u], parameter[name[self].text]]
if compare[name[self].url is_not constant[None]] begin[:]
call[name[array]][constant[url]] assign[=] call[name[u], parameter[name[self].url]]
if compare[name[self].callback_data is_not constant[None]] begin[:]
call[name[array]][constant[callback_data]] assign[=] call[name[u], parameter[name[self].callback_data]]
if compare[name[self].switch_inline_query is_not constant[None]] begin[:]
call[name[array]][constant[switch_inline_query]] assign[=] call[name[u], parameter[name[self].switch_inline_query]]
if compare[name[self].switch_inline_query_current_chat is_not constant[None]] begin[:]
call[name[array]][constant[switch_inline_query_current_chat]] assign[=] call[name[u], parameter[name[self].switch_inline_query_current_chat]]
if compare[name[self].callback_game is_not constant[None]] begin[:]
call[name[array]][constant[callback_game]] assign[=] call[name[self].callback_game.to_array, parameter[]]
if compare[name[self].pay is_not constant[None]] begin[:]
call[name[array]][constant[pay]] assign[=] call[name[bool], parameter[name[self].pay]]
return[name[array]]
|
keyword[def] identifier[to_array] ( identifier[self] ):
literal[string]
identifier[array] = identifier[super] ( identifier[InlineKeyboardButton] , identifier[self] ). identifier[to_array] ()
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[text] )
keyword[if] identifier[self] . identifier[url] keyword[is] keyword[not] keyword[None] :
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[url] )
keyword[if] identifier[self] . identifier[callback_data] keyword[is] keyword[not] keyword[None] :
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[callback_data] )
keyword[if] identifier[self] . identifier[switch_inline_query] keyword[is] keyword[not] keyword[None] :
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[switch_inline_query] )
keyword[if] identifier[self] . identifier[switch_inline_query_current_chat] keyword[is] keyword[not] keyword[None] :
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[switch_inline_query_current_chat] )
keyword[if] identifier[self] . identifier[callback_game] keyword[is] keyword[not] keyword[None] :
identifier[array] [ literal[string] ]= identifier[self] . identifier[callback_game] . identifier[to_array] ()
keyword[if] identifier[self] . identifier[pay] keyword[is] keyword[not] keyword[None] :
identifier[array] [ literal[string] ]= identifier[bool] ( identifier[self] . identifier[pay] )
keyword[return] identifier[array]
|
def to_array(self):
"""
Serializes this InlineKeyboardButton to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(InlineKeyboardButton, self).to_array()
array['text'] = u(self.text) # py2: type unicode, py3: type str
if self.url is not None:
array['url'] = u(self.url) # py2: type unicode, py3: type str # depends on [control=['if'], data=[]]
if self.callback_data is not None:
array['callback_data'] = u(self.callback_data) # py2: type unicode, py3: type str # depends on [control=['if'], data=[]]
if self.switch_inline_query is not None:
array['switch_inline_query'] = u(self.switch_inline_query) # py2: type unicode, py3: type str # depends on [control=['if'], data=[]]
if self.switch_inline_query_current_chat is not None:
array['switch_inline_query_current_chat'] = u(self.switch_inline_query_current_chat) # py2: type unicode, py3: type str # depends on [control=['if'], data=[]]
if self.callback_game is not None:
array['callback_game'] = self.callback_game.to_array() # type CallbackGame # depends on [control=['if'], data=[]]
if self.pay is not None:
array['pay'] = bool(self.pay) # type bool # depends on [control=['if'], data=[]]
return array
|
def post_struct(UserStruct):
"""
Post Model
"""
db = UserStruct.User.db
class SlugNameMixin(object):
name = db.Column(db.String(250))
slug = db.Column(db.String(250), index=True, unique=True)
@classmethod
def get_by_slug(cls, slug):
"""
Return a post by slug
"""
return cls.all().filter(cls.slug == slug).first()
@classmethod
def new(cls, name, slug=None):
slug = utils.slug(name if not slug else slug)
return cls.create(name=name, slug=slug)
def rename(self, name, slug=None):
slug = utils.slug(name if not slug else slug)
return self.update(name=name, slug=slug)
class PostType(SlugNameMixin, db.Model):
@property
def total_posts(self):
return Post.all().filter(Post.type_id == self.id).count()
class PostCategory(SlugNameMixin, db.Model):
@property
def total_posts(self):
return PostPostCategory.all()\
.filter(PostPostCategory.category_id == self.id)\
.count()
class PostPostCategory(db.Model):
post_id = db.Column(db.Integer, db.ForeignKey("post.id"))
category_id = db.Column(db.Integer, db.ForeignKey(PostCategory.id))
@classmethod
def add(cls, post_id, category_id):
c = cls.all().filter(cls.post_id == post_id)\
.filter(cls.category_id == category_id)\
.first()
if not c:
cls.create(post_id=post_id, category_id=category_id)
@classmethod
def remove(cls, post_id, category_id):
c = cls.all().filter(cls.post_id == post_id)\
.filter(cls.category_id == category_id)\
.first()
if c:
c.delete(hard_delete=True)
class Post(db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(UserStruct.User.id))
type_id = db.Column(db.Integer, db.ForeignKey(PostType.id))
parent_id = db.Column(db.Integer)
revision_id = db.Column(db.Integer)
title = db.Column(db.String(250))
slug = db.Column(db.String(250), index=True)
content = db.Column(db.Text)
excerpt = db.Column(db.Text)
is_public = db.Column(db.Boolean, index=True, default=False)
is_sticky = db.Column(db.Boolean, index=True, default=False)
is_published = db.Column(db.Boolean, index=True, default=True)
is_draft = db.Column(db.Boolean, index=True, default=False)
is_revision = db.Column(db.Boolean, default=False)
published_date = db.Column(db.DateTime)
author = db.relationship(UserStruct.User, backref="posts")
type = db.relationship(PostType, backref="posts")
categories = db.relationship(PostCategory,
secondary=PostPostCategory.__table__.name)
@classmethod
def new(cls, title, **kwargs):
"""
Insert a new post
"""
published_date = None
is_revision = False
is_published = False
is_draft = False
is_public = kwargs["is_public"] if "is_public" in kwargs else True
parent_id = int(kwargs["parent_id"]) if "parent_id" in kwargs else None
if "is_revision" in kwargs and kwargs["is_revision"] is True:
if not parent_id:
raise ModelError("'parent_id' is missing for revision")
is_revision =True
is_public = False
elif "is_draft" in kwargs and kwargs["is_draft"] is True:
is_draft = True
is_public = False
elif "is_published" in kwargs and kwargs["is_published"] is True:
is_published = True
published_date = datetime.datetime.now()
slug = ""
if is_published or is_draft:
slug = cls.create_slug(title if "slug" not in kwargs else kwargs["slug"])
data = {
"title": title,
"slug": slug,
"content": kwargs["content"] if "content" in kwargs else "",
"excerpt": kwargs["excerpt"] if "excerpt" in kwargs else "",
"is_published": is_published,
"published_date": published_date,
"is_draft": is_draft,
"is_revision": is_revision,
"is_public": is_public,
"parent_id": parent_id,
"type_id": kwargs["type_id"] if "type_id" in kwargs else None
}
return cls.create(**data)
@classmethod
def get_published(cls, id=None, slug=None):
"""
Get a published post by id or slug
:param id: The id of the post
:param slug: str - The slug to look for
"""
post = None
if id:
post = cls.get(id)
elif slug:
post = cls.get_by_slug(slug)
return post if post and post.is_published else None
@classmethod
def get_published_by_category_slug(cls, slug):
"""
Query by category slug
:return SQLA :
"""
return cls.all()\
.join(PostPostCategory)\
.join(PostCategory)\
.filter(PostCategory.slug == slug)\
.filter(cls.is_published == True)
@classmethod
def get_published_by_type_slug(cls, slug):
"""
Query by type slug
:return SQLA :
"""
return cls.all()\
.join(PostType)\
.filter(PostType.slug == slug)\
.filter(cls.is_published == True)
@classmethod
def create_slug(cls, title):
slug_counter = 0
_slug = utils.slug(title).lower()
while True:
slug = _slug
if slug_counter > 0:
slug += str(slug_counter)
slug_counter += 1
if not cls.get_by_slug(slug):
break
return slug
@classmethod
def get_by_slug(cls, slug):
"""
Return a post by slug
"""
return cls.all().filter(cls.slug == slug).first()
def publish(self, published_date=None):
if self.is_draft:
data = {
"is_draft": False,
"is_published": True,
"published_date": published_date or datetime.datetime.now()
}
self.update(**data)
def set_type(self, type_id):
self.update(type_id=type_id)
def set_slug(self, title):
slug = utils.slug(title)
if title and slug != self.slug:
slug = self.create_slug(slug)
self.update(slug=slug)
def replace_categories(self, categories_list):
cats = PostPostCategory.all()\
.filter(PostPostCategory.post_id == self.id)
cats_list = [c.category_id for c in cats]
del_cats = list(set(cats_list) - set(categories_list))
new_cats = list(set(categories_list) - set(cats_list))
for dc in del_cats:
PostPostCategory.remove(post_id=self.id, category_id=dc)
for nc in new_cats:
PostPostCategory.add(post_id=self.id, category_id=nc)
@property
def status(self):
if self.is_published:
return "Published"
elif self.is_draft:
return "Draft"
elif self.is_revision:
return "Revision"
def delete_revisions(self):
"""
Delete all revisions
"""
try:
Post.all()\
.filter(Post.post_id == self.id)\
.filter(Post.is_revision == True)\
.delete()
Post.db.commit()
except Exception as ex:
Post.db.rollback()
@property
def total_revisions(self):
return Post.all()\
.filter(Post.post_id == self.id)\
.filter(Post.is_revision == True)\
.count()
return utils.to_struct(Post=Post,
Category=PostCategory,
Type=PostType,
PostCategory=PostPostCategory
)
|
def function[post_struct, parameter[UserStruct]]:
constant[
Post Model
]
variable[db] assign[=] name[UserStruct].User.db
class class[SlugNameMixin, parameter[]] begin[:]
variable[name] assign[=] call[name[db].Column, parameter[call[name[db].String, parameter[constant[250]]]]]
variable[slug] assign[=] call[name[db].Column, parameter[call[name[db].String, parameter[constant[250]]]]]
def function[get_by_slug, parameter[cls, slug]]:
constant[
Return a post by slug
]
return[call[call[call[name[cls].all, parameter[]].filter, parameter[compare[name[cls].slug equal[==] name[slug]]]].first, parameter[]]]
def function[new, parameter[cls, name, slug]]:
variable[slug] assign[=] call[name[utils].slug, parameter[<ast.IfExp object at 0x7da1b24ffbe0>]]
return[call[name[cls].create, parameter[]]]
def function[rename, parameter[self, name, slug]]:
variable[slug] assign[=] call[name[utils].slug, parameter[<ast.IfExp object at 0x7da1b25eeb90>]]
return[call[name[self].update, parameter[]]]
class class[PostType, parameter[]] begin[:]
def function[total_posts, parameter[self]]:
return[call[call[call[name[Post].all, parameter[]].filter, parameter[compare[name[Post].type_id equal[==] name[self].id]]].count, parameter[]]]
class class[PostCategory, parameter[]] begin[:]
def function[total_posts, parameter[self]]:
return[call[call[call[name[PostPostCategory].all, parameter[]].filter, parameter[compare[name[PostPostCategory].category_id equal[==] name[self].id]]].count, parameter[]]]
class class[PostPostCategory, parameter[]] begin[:]
variable[post_id] assign[=] call[name[db].Column, parameter[name[db].Integer, call[name[db].ForeignKey, parameter[constant[post.id]]]]]
variable[category_id] assign[=] call[name[db].Column, parameter[name[db].Integer, call[name[db].ForeignKey, parameter[name[PostCategory].id]]]]
def function[add, parameter[cls, post_id, category_id]]:
variable[c] assign[=] call[call[call[call[name[cls].all, parameter[]].filter, parameter[compare[name[cls].post_id equal[==] name[post_id]]]].filter, parameter[compare[name[cls].category_id equal[==] name[category_id]]]].first, parameter[]]
if <ast.UnaryOp object at 0x7da1b25ed7b0> begin[:]
call[name[cls].create, parameter[]]
def function[remove, parameter[cls, post_id, category_id]]:
variable[c] assign[=] call[call[call[call[name[cls].all, parameter[]].filter, parameter[compare[name[cls].post_id equal[==] name[post_id]]]].filter, parameter[compare[name[cls].category_id equal[==] name[category_id]]]].first, parameter[]]
if name[c] begin[:]
call[name[c].delete, parameter[]]
class class[Post, parameter[]] begin[:]
variable[user_id] assign[=] call[name[db].Column, parameter[name[db].Integer, call[name[db].ForeignKey, parameter[name[UserStruct].User.id]]]]
variable[type_id] assign[=] call[name[db].Column, parameter[name[db].Integer, call[name[db].ForeignKey, parameter[name[PostType].id]]]]
variable[parent_id] assign[=] call[name[db].Column, parameter[name[db].Integer]]
variable[revision_id] assign[=] call[name[db].Column, parameter[name[db].Integer]]
variable[title] assign[=] call[name[db].Column, parameter[call[name[db].String, parameter[constant[250]]]]]
variable[slug] assign[=] call[name[db].Column, parameter[call[name[db].String, parameter[constant[250]]]]]
variable[content] assign[=] call[name[db].Column, parameter[name[db].Text]]
variable[excerpt] assign[=] call[name[db].Column, parameter[name[db].Text]]
variable[is_public] assign[=] call[name[db].Column, parameter[name[db].Boolean]]
variable[is_sticky] assign[=] call[name[db].Column, parameter[name[db].Boolean]]
variable[is_published] assign[=] call[name[db].Column, parameter[name[db].Boolean]]
variable[is_draft] assign[=] call[name[db].Column, parameter[name[db].Boolean]]
variable[is_revision] assign[=] call[name[db].Column, parameter[name[db].Boolean]]
variable[published_date] assign[=] call[name[db].Column, parameter[name[db].DateTime]]
variable[author] assign[=] call[name[db].relationship, parameter[name[UserStruct].User]]
variable[type] assign[=] call[name[db].relationship, parameter[name[PostType]]]
variable[categories] assign[=] call[name[db].relationship, parameter[name[PostCategory]]]
def function[new, parameter[cls, title]]:
constant[
Insert a new post
]
variable[published_date] assign[=] constant[None]
variable[is_revision] assign[=] constant[False]
variable[is_published] assign[=] constant[False]
variable[is_draft] assign[=] constant[False]
variable[is_public] assign[=] <ast.IfExp object at 0x7da1b25e96c0>
variable[parent_id] assign[=] <ast.IfExp object at 0x7da1b25e9210>
if <ast.BoolOp object at 0x7da1b25eae60> begin[:]
if <ast.UnaryOp object at 0x7da1b25ebac0> begin[:]
<ast.Raise object at 0x7da1b25e9c60>
variable[is_revision] assign[=] constant[True]
variable[is_public] assign[=] constant[False]
variable[slug] assign[=] constant[]
if <ast.BoolOp object at 0x7da1b25d2f20> begin[:]
variable[slug] assign[=] call[name[cls].create_slug, parameter[<ast.IfExp object at 0x7da1b25d26b0>]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b25d2bf0>, <ast.Constant object at 0x7da1b25d2890>, <ast.Constant object at 0x7da1b25d2980>, <ast.Constant object at 0x7da1b25d1060>, <ast.Constant object at 0x7da1b25d2a10>, <ast.Constant object at 0x7da1b25d2a40>, <ast.Constant object at 0x7da1b25d2cb0>, <ast.Constant object at 0x7da1b25d1090>, <ast.Constant object at 0x7da1b25d0f70>, <ast.Constant object at 0x7da1b25d2950>, <ast.Constant object at 0x7da1b25d2e60>], [<ast.Name object at 0x7da1b25d2d40>, <ast.Name object at 0x7da1b25d0fa0>, <ast.IfExp object at 0x7da1b25d3160>, <ast.IfExp object at 0x7da1b25d3520>, <ast.Name object at 0x7da1b25d2e00>, <ast.Name object at 0x7da1b25d33d0>, <ast.Name object at 0x7da1b25d2d10>, <ast.Name object at 0x7da1b25d3190>, <ast.Name object at 0x7da1b25d00a0>, <ast.Name object at 0x7da1b25d2770>, <ast.IfExp object at 0x7da1b25d2710>]]
return[call[name[cls].create, parameter[]]]
def function[get_published, parameter[cls, id, slug]]:
constant[
Get a published post by id or slug
:param id: The id of the post
:param slug: str - The slug to look for
]
variable[post] assign[=] constant[None]
if name[id] begin[:]
variable[post] assign[=] call[name[cls].get, parameter[name[id]]]
return[<ast.IfExp object at 0x7da1b25d0670>]
def function[get_published_by_category_slug, parameter[cls, slug]]:
constant[
Query by category slug
:return SQLA :
]
return[call[call[call[call[call[name[cls].all, parameter[]].join, parameter[name[PostPostCategory]]].join, parameter[name[PostCategory]]].filter, parameter[compare[name[PostCategory].slug equal[==] name[slug]]]].filter, parameter[compare[name[cls].is_published equal[==] constant[True]]]]]
def function[get_published_by_type_slug, parameter[cls, slug]]:
constant[
Query by type slug
:return SQLA :
]
return[call[call[call[call[name[cls].all, parameter[]].join, parameter[name[PostType]]].filter, parameter[compare[name[PostType].slug equal[==] name[slug]]]].filter, parameter[compare[name[cls].is_published equal[==] constant[True]]]]]
def function[create_slug, parameter[cls, title]]:
variable[slug_counter] assign[=] constant[0]
variable[_slug] assign[=] call[call[name[utils].slug, parameter[name[title]]].lower, parameter[]]
while constant[True] begin[:]
variable[slug] assign[=] name[_slug]
if compare[name[slug_counter] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b25968f0>
<ast.AugAssign object at 0x7da1b2594dc0>
if <ast.UnaryOp object at 0x7da1b2595390> begin[:]
break
return[name[slug]]
def function[get_by_slug, parameter[cls, slug]]:
constant[
Return a post by slug
]
return[call[call[call[name[cls].all, parameter[]].filter, parameter[compare[name[cls].slug equal[==] name[slug]]]].first, parameter[]]]
def function[publish, parameter[self, published_date]]:
if name[self].is_draft begin[:]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b2595b10>, <ast.Constant object at 0x7da1b2597160>, <ast.Constant object at 0x7da1b25978b0>], [<ast.Constant object at 0x7da1b25946d0>, <ast.Constant object at 0x7da1b25971c0>, <ast.BoolOp object at 0x7da1b2597e20>]]
call[name[self].update, parameter[]]
def function[set_type, parameter[self, type_id]]:
call[name[self].update, parameter[]]
def function[set_slug, parameter[self, title]]:
variable[slug] assign[=] call[name[utils].slug, parameter[name[title]]]
if <ast.BoolOp object at 0x7da1b25942e0> begin[:]
variable[slug] assign[=] call[name[self].create_slug, parameter[name[slug]]]
call[name[self].update, parameter[]]
def function[replace_categories, parameter[self, categories_list]]:
variable[cats] assign[=] call[call[name[PostPostCategory].all, parameter[]].filter, parameter[compare[name[PostPostCategory].post_id equal[==] name[self].id]]]
variable[cats_list] assign[=] <ast.ListComp object at 0x7da1b2594820>
variable[del_cats] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[cats_list]]] - call[name[set], parameter[name[categories_list]]]]]]
variable[new_cats] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[categories_list]]] - call[name[set], parameter[name[cats_list]]]]]]
for taget[name[dc]] in starred[name[del_cats]] begin[:]
call[name[PostPostCategory].remove, parameter[]]
for taget[name[nc]] in starred[name[new_cats]] begin[:]
call[name[PostPostCategory].add, parameter[]]
def function[status, parameter[self]]:
if name[self].is_published begin[:]
return[constant[Published]]
def function[delete_revisions, parameter[self]]:
constant[
Delete all revisions
]
<ast.Try object at 0x7da1b259ce80>
def function[total_revisions, parameter[self]]:
return[call[call[call[call[name[Post].all, parameter[]].filter, parameter[compare[name[Post].post_id equal[==] name[self].id]]].filter, parameter[compare[name[Post].is_revision equal[==] constant[True]]]].count, parameter[]]]
return[call[name[utils].to_struct, parameter[]]]
|
keyword[def] identifier[post_struct] ( identifier[UserStruct] ):
literal[string]
identifier[db] = identifier[UserStruct] . identifier[User] . identifier[db]
keyword[class] identifier[SlugNameMixin] ( identifier[object] ):
identifier[name] = identifier[db] . identifier[Column] ( identifier[db] . identifier[String] ( literal[int] ))
identifier[slug] = identifier[db] . identifier[Column] ( identifier[db] . identifier[String] ( literal[int] ), identifier[index] = keyword[True] , identifier[unique] = keyword[True] )
@ identifier[classmethod]
keyword[def] identifier[get_by_slug] ( identifier[cls] , identifier[slug] ):
literal[string]
keyword[return] identifier[cls] . identifier[all] (). identifier[filter] ( identifier[cls] . identifier[slug] == identifier[slug] ). identifier[first] ()
@ identifier[classmethod]
keyword[def] identifier[new] ( identifier[cls] , identifier[name] , identifier[slug] = keyword[None] ):
identifier[slug] = identifier[utils] . identifier[slug] ( identifier[name] keyword[if] keyword[not] identifier[slug] keyword[else] identifier[slug] )
keyword[return] identifier[cls] . identifier[create] ( identifier[name] = identifier[name] , identifier[slug] = identifier[slug] )
keyword[def] identifier[rename] ( identifier[self] , identifier[name] , identifier[slug] = keyword[None] ):
identifier[slug] = identifier[utils] . identifier[slug] ( identifier[name] keyword[if] keyword[not] identifier[slug] keyword[else] identifier[slug] )
keyword[return] identifier[self] . identifier[update] ( identifier[name] = identifier[name] , identifier[slug] = identifier[slug] )
keyword[class] identifier[PostType] ( identifier[SlugNameMixin] , identifier[db] . identifier[Model] ):
@ identifier[property]
keyword[def] identifier[total_posts] ( identifier[self] ):
keyword[return] identifier[Post] . identifier[all] (). identifier[filter] ( identifier[Post] . identifier[type_id] == identifier[self] . identifier[id] ). identifier[count] ()
keyword[class] identifier[PostCategory] ( identifier[SlugNameMixin] , identifier[db] . identifier[Model] ):
@ identifier[property]
keyword[def] identifier[total_posts] ( identifier[self] ):
keyword[return] identifier[PostPostCategory] . identifier[all] (). identifier[filter] ( identifier[PostPostCategory] . identifier[category_id] == identifier[self] . identifier[id] ). identifier[count] ()
keyword[class] identifier[PostPostCategory] ( identifier[db] . identifier[Model] ):
identifier[post_id] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Integer] , identifier[db] . identifier[ForeignKey] ( literal[string] ))
identifier[category_id] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Integer] , identifier[db] . identifier[ForeignKey] ( identifier[PostCategory] . identifier[id] ))
@ identifier[classmethod]
keyword[def] identifier[add] ( identifier[cls] , identifier[post_id] , identifier[category_id] ):
identifier[c] = identifier[cls] . identifier[all] (). identifier[filter] ( identifier[cls] . identifier[post_id] == identifier[post_id] ). identifier[filter] ( identifier[cls] . identifier[category_id] == identifier[category_id] ). identifier[first] ()
keyword[if] keyword[not] identifier[c] :
identifier[cls] . identifier[create] ( identifier[post_id] = identifier[post_id] , identifier[category_id] = identifier[category_id] )
@ identifier[classmethod]
keyword[def] identifier[remove] ( identifier[cls] , identifier[post_id] , identifier[category_id] ):
identifier[c] = identifier[cls] . identifier[all] (). identifier[filter] ( identifier[cls] . identifier[post_id] == identifier[post_id] ). identifier[filter] ( identifier[cls] . identifier[category_id] == identifier[category_id] ). identifier[first] ()
keyword[if] identifier[c] :
identifier[c] . identifier[delete] ( identifier[hard_delete] = keyword[True] )
keyword[class] identifier[Post] ( identifier[db] . identifier[Model] ):
identifier[user_id] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Integer] , identifier[db] . identifier[ForeignKey] ( identifier[UserStruct] . identifier[User] . identifier[id] ))
identifier[type_id] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Integer] , identifier[db] . identifier[ForeignKey] ( identifier[PostType] . identifier[id] ))
identifier[parent_id] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Integer] )
identifier[revision_id] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Integer] )
identifier[title] = identifier[db] . identifier[Column] ( identifier[db] . identifier[String] ( literal[int] ))
identifier[slug] = identifier[db] . identifier[Column] ( identifier[db] . identifier[String] ( literal[int] ), identifier[index] = keyword[True] )
identifier[content] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Text] )
identifier[excerpt] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Text] )
identifier[is_public] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Boolean] , identifier[index] = keyword[True] , identifier[default] = keyword[False] )
identifier[is_sticky] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Boolean] , identifier[index] = keyword[True] , identifier[default] = keyword[False] )
identifier[is_published] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Boolean] , identifier[index] = keyword[True] , identifier[default] = keyword[True] )
identifier[is_draft] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Boolean] , identifier[index] = keyword[True] , identifier[default] = keyword[False] )
identifier[is_revision] = identifier[db] . identifier[Column] ( identifier[db] . identifier[Boolean] , identifier[default] = keyword[False] )
identifier[published_date] = identifier[db] . identifier[Column] ( identifier[db] . identifier[DateTime] )
identifier[author] = identifier[db] . identifier[relationship] ( identifier[UserStruct] . identifier[User] , identifier[backref] = literal[string] )
identifier[type] = identifier[db] . identifier[relationship] ( identifier[PostType] , identifier[backref] = literal[string] )
identifier[categories] = identifier[db] . identifier[relationship] ( identifier[PostCategory] ,
identifier[secondary] = identifier[PostPostCategory] . identifier[__table__] . identifier[name] )
@ identifier[classmethod]
keyword[def] identifier[new] ( identifier[cls] , identifier[title] ,** identifier[kwargs] ):
literal[string]
identifier[published_date] = keyword[None]
identifier[is_revision] = keyword[False]
identifier[is_published] = keyword[False]
identifier[is_draft] = keyword[False]
identifier[is_public] = identifier[kwargs] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] keyword[True]
identifier[parent_id] = identifier[int] ( identifier[kwargs] [ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] keyword[None]
keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[and] identifier[kwargs] [ literal[string] ] keyword[is] keyword[True] :
keyword[if] keyword[not] identifier[parent_id] :
keyword[raise] identifier[ModelError] ( literal[string] )
identifier[is_revision] = keyword[True]
identifier[is_public] = keyword[False]
keyword[elif] literal[string] keyword[in] identifier[kwargs] keyword[and] identifier[kwargs] [ literal[string] ] keyword[is] keyword[True] :
identifier[is_draft] = keyword[True]
identifier[is_public] = keyword[False]
keyword[elif] literal[string] keyword[in] identifier[kwargs] keyword[and] identifier[kwargs] [ literal[string] ] keyword[is] keyword[True] :
identifier[is_published] = keyword[True]
identifier[published_date] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[slug] = literal[string]
keyword[if] identifier[is_published] keyword[or] identifier[is_draft] :
identifier[slug] = identifier[cls] . identifier[create_slug] ( identifier[title] keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] keyword[else] identifier[kwargs] [ literal[string] ])
identifier[data] ={
literal[string] : identifier[title] ,
literal[string] : identifier[slug] ,
literal[string] : identifier[kwargs] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] literal[string] ,
literal[string] : identifier[kwargs] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] literal[string] ,
literal[string] : identifier[is_published] ,
literal[string] : identifier[published_date] ,
literal[string] : identifier[is_draft] ,
literal[string] : identifier[is_revision] ,
literal[string] : identifier[is_public] ,
literal[string] : identifier[parent_id] ,
literal[string] : identifier[kwargs] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] keyword[None]
}
keyword[return] identifier[cls] . identifier[create] (** identifier[data] )
@ identifier[classmethod]
keyword[def] identifier[get_published] ( identifier[cls] , identifier[id] = keyword[None] , identifier[slug] = keyword[None] ):
literal[string]
identifier[post] = keyword[None]
keyword[if] identifier[id] :
identifier[post] = identifier[cls] . identifier[get] ( identifier[id] )
keyword[elif] identifier[slug] :
identifier[post] = identifier[cls] . identifier[get_by_slug] ( identifier[slug] )
keyword[return] identifier[post] keyword[if] identifier[post] keyword[and] identifier[post] . identifier[is_published] keyword[else] keyword[None]
@ identifier[classmethod]
keyword[def] identifier[get_published_by_category_slug] ( identifier[cls] , identifier[slug] ):
literal[string]
keyword[return] identifier[cls] . identifier[all] (). identifier[join] ( identifier[PostPostCategory] ). identifier[join] ( identifier[PostCategory] ). identifier[filter] ( identifier[PostCategory] . identifier[slug] == identifier[slug] ). identifier[filter] ( identifier[cls] . identifier[is_published] == keyword[True] )
@ identifier[classmethod]
keyword[def] identifier[get_published_by_type_slug] ( identifier[cls] , identifier[slug] ):
literal[string]
keyword[return] identifier[cls] . identifier[all] (). identifier[join] ( identifier[PostType] ). identifier[filter] ( identifier[PostType] . identifier[slug] == identifier[slug] ). identifier[filter] ( identifier[cls] . identifier[is_published] == keyword[True] )
@ identifier[classmethod]
keyword[def] identifier[create_slug] ( identifier[cls] , identifier[title] ):
identifier[slug_counter] = literal[int]
identifier[_slug] = identifier[utils] . identifier[slug] ( identifier[title] ). identifier[lower] ()
keyword[while] keyword[True] :
identifier[slug] = identifier[_slug]
keyword[if] identifier[slug_counter] > literal[int] :
identifier[slug] += identifier[str] ( identifier[slug_counter] )
identifier[slug_counter] += literal[int]
keyword[if] keyword[not] identifier[cls] . identifier[get_by_slug] ( identifier[slug] ):
keyword[break]
keyword[return] identifier[slug]
@ identifier[classmethod]
keyword[def] identifier[get_by_slug] ( identifier[cls] , identifier[slug] ):
literal[string]
keyword[return] identifier[cls] . identifier[all] (). identifier[filter] ( identifier[cls] . identifier[slug] == identifier[slug] ). identifier[first] ()
keyword[def] identifier[publish] ( identifier[self] , identifier[published_date] = keyword[None] ):
keyword[if] identifier[self] . identifier[is_draft] :
identifier[data] ={
literal[string] : keyword[False] ,
literal[string] : keyword[True] ,
literal[string] : identifier[published_date] keyword[or] identifier[datetime] . identifier[datetime] . identifier[now] ()
}
identifier[self] . identifier[update] (** identifier[data] )
keyword[def] identifier[set_type] ( identifier[self] , identifier[type_id] ):
identifier[self] . identifier[update] ( identifier[type_id] = identifier[type_id] )
keyword[def] identifier[set_slug] ( identifier[self] , identifier[title] ):
identifier[slug] = identifier[utils] . identifier[slug] ( identifier[title] )
keyword[if] identifier[title] keyword[and] identifier[slug] != identifier[self] . identifier[slug] :
identifier[slug] = identifier[self] . identifier[create_slug] ( identifier[slug] )
identifier[self] . identifier[update] ( identifier[slug] = identifier[slug] )
keyword[def] identifier[replace_categories] ( identifier[self] , identifier[categories_list] ):
identifier[cats] = identifier[PostPostCategory] . identifier[all] (). identifier[filter] ( identifier[PostPostCategory] . identifier[post_id] == identifier[self] . identifier[id] )
identifier[cats_list] =[ identifier[c] . identifier[category_id] keyword[for] identifier[c] keyword[in] identifier[cats] ]
identifier[del_cats] = identifier[list] ( identifier[set] ( identifier[cats_list] )- identifier[set] ( identifier[categories_list] ))
identifier[new_cats] = identifier[list] ( identifier[set] ( identifier[categories_list] )- identifier[set] ( identifier[cats_list] ))
keyword[for] identifier[dc] keyword[in] identifier[del_cats] :
identifier[PostPostCategory] . identifier[remove] ( identifier[post_id] = identifier[self] . identifier[id] , identifier[category_id] = identifier[dc] )
keyword[for] identifier[nc] keyword[in] identifier[new_cats] :
identifier[PostPostCategory] . identifier[add] ( identifier[post_id] = identifier[self] . identifier[id] , identifier[category_id] = identifier[nc] )
@ identifier[property]
keyword[def] identifier[status] ( identifier[self] ):
keyword[if] identifier[self] . identifier[is_published] :
keyword[return] literal[string]
keyword[elif] identifier[self] . identifier[is_draft] :
keyword[return] literal[string]
keyword[elif] identifier[self] . identifier[is_revision] :
keyword[return] literal[string]
keyword[def] identifier[delete_revisions] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[Post] . identifier[all] (). identifier[filter] ( identifier[Post] . identifier[post_id] == identifier[self] . identifier[id] ). identifier[filter] ( identifier[Post] . identifier[is_revision] == keyword[True] ). identifier[delete] ()
identifier[Post] . identifier[db] . identifier[commit] ()
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[Post] . identifier[db] . identifier[rollback] ()
@ identifier[property]
keyword[def] identifier[total_revisions] ( identifier[self] ):
keyword[return] identifier[Post] . identifier[all] (). identifier[filter] ( identifier[Post] . identifier[post_id] == identifier[self] . identifier[id] ). identifier[filter] ( identifier[Post] . identifier[is_revision] == keyword[True] ). identifier[count] ()
keyword[return] identifier[utils] . identifier[to_struct] ( identifier[Post] = identifier[Post] ,
identifier[Category] = identifier[PostCategory] ,
identifier[Type] = identifier[PostType] ,
identifier[PostCategory] = identifier[PostPostCategory]
)
|
def post_struct(UserStruct):
"""
Post Model
"""
db = UserStruct.User.db
class SlugNameMixin(object):
name = db.Column(db.String(250))
slug = db.Column(db.String(250), index=True, unique=True)
@classmethod
def get_by_slug(cls, slug):
"""
Return a post by slug
"""
return cls.all().filter(cls.slug == slug).first()
@classmethod
def new(cls, name, slug=None):
slug = utils.slug(name if not slug else slug)
return cls.create(name=name, slug=slug)
def rename(self, name, slug=None):
slug = utils.slug(name if not slug else slug)
return self.update(name=name, slug=slug)
class PostType(SlugNameMixin, db.Model):
@property
def total_posts(self):
return Post.all().filter(Post.type_id == self.id).count()
class PostCategory(SlugNameMixin, db.Model):
@property
def total_posts(self):
return PostPostCategory.all().filter(PostPostCategory.category_id == self.id).count()
class PostPostCategory(db.Model):
post_id = db.Column(db.Integer, db.ForeignKey('post.id'))
category_id = db.Column(db.Integer, db.ForeignKey(PostCategory.id))
@classmethod
def add(cls, post_id, category_id):
c = cls.all().filter(cls.post_id == post_id).filter(cls.category_id == category_id).first()
if not c:
cls.create(post_id=post_id, category_id=category_id) # depends on [control=['if'], data=[]]
@classmethod
def remove(cls, post_id, category_id):
c = cls.all().filter(cls.post_id == post_id).filter(cls.category_id == category_id).first()
if c:
c.delete(hard_delete=True) # depends on [control=['if'], data=[]]
class Post(db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(UserStruct.User.id))
type_id = db.Column(db.Integer, db.ForeignKey(PostType.id))
parent_id = db.Column(db.Integer)
revision_id = db.Column(db.Integer)
title = db.Column(db.String(250))
slug = db.Column(db.String(250), index=True)
content = db.Column(db.Text)
excerpt = db.Column(db.Text)
is_public = db.Column(db.Boolean, index=True, default=False)
is_sticky = db.Column(db.Boolean, index=True, default=False)
is_published = db.Column(db.Boolean, index=True, default=True)
is_draft = db.Column(db.Boolean, index=True, default=False)
is_revision = db.Column(db.Boolean, default=False)
published_date = db.Column(db.DateTime)
author = db.relationship(UserStruct.User, backref='posts')
type = db.relationship(PostType, backref='posts')
categories = db.relationship(PostCategory, secondary=PostPostCategory.__table__.name)
@classmethod
def new(cls, title, **kwargs):
"""
Insert a new post
"""
published_date = None
is_revision = False
is_published = False
is_draft = False
is_public = kwargs['is_public'] if 'is_public' in kwargs else True
parent_id = int(kwargs['parent_id']) if 'parent_id' in kwargs else None
if 'is_revision' in kwargs and kwargs['is_revision'] is True:
if not parent_id:
raise ModelError("'parent_id' is missing for revision") # depends on [control=['if'], data=[]]
is_revision = True
is_public = False # depends on [control=['if'], data=[]]
elif 'is_draft' in kwargs and kwargs['is_draft'] is True:
is_draft = True
is_public = False # depends on [control=['if'], data=[]]
elif 'is_published' in kwargs and kwargs['is_published'] is True:
is_published = True
published_date = datetime.datetime.now() # depends on [control=['if'], data=[]]
slug = ''
if is_published or is_draft:
slug = cls.create_slug(title if 'slug' not in kwargs else kwargs['slug']) # depends on [control=['if'], data=[]]
data = {'title': title, 'slug': slug, 'content': kwargs['content'] if 'content' in kwargs else '', 'excerpt': kwargs['excerpt'] if 'excerpt' in kwargs else '', 'is_published': is_published, 'published_date': published_date, 'is_draft': is_draft, 'is_revision': is_revision, 'is_public': is_public, 'parent_id': parent_id, 'type_id': kwargs['type_id'] if 'type_id' in kwargs else None}
return cls.create(**data)
@classmethod
def get_published(cls, id=None, slug=None):
"""
Get a published post by id or slug
:param id: The id of the post
:param slug: str - The slug to look for
"""
post = None
if id:
post = cls.get(id) # depends on [control=['if'], data=[]]
elif slug:
post = cls.get_by_slug(slug) # depends on [control=['if'], data=[]]
return post if post and post.is_published else None
@classmethod
def get_published_by_category_slug(cls, slug):
"""
Query by category slug
:return SQLA :
"""
return cls.all().join(PostPostCategory).join(PostCategory).filter(PostCategory.slug == slug).filter(cls.is_published == True)
@classmethod
def get_published_by_type_slug(cls, slug):
"""
Query by type slug
:return SQLA :
"""
return cls.all().join(PostType).filter(PostType.slug == slug).filter(cls.is_published == True)
@classmethod
def create_slug(cls, title):
slug_counter = 0
_slug = utils.slug(title).lower()
while True:
slug = _slug
if slug_counter > 0:
slug += str(slug_counter)
slug_counter += 1 # depends on [control=['if'], data=['slug_counter']]
if not cls.get_by_slug(slug):
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return slug
@classmethod
def get_by_slug(cls, slug):
"""
Return a post by slug
"""
return cls.all().filter(cls.slug == slug).first()
def publish(self, published_date=None):
if self.is_draft:
data = {'is_draft': False, 'is_published': True, 'published_date': published_date or datetime.datetime.now()}
self.update(**data) # depends on [control=['if'], data=[]]
def set_type(self, type_id):
self.update(type_id=type_id)
def set_slug(self, title):
slug = utils.slug(title)
if title and slug != self.slug:
slug = self.create_slug(slug)
self.update(slug=slug) # depends on [control=['if'], data=[]]
def replace_categories(self, categories_list):
cats = PostPostCategory.all().filter(PostPostCategory.post_id == self.id)
cats_list = [c.category_id for c in cats]
del_cats = list(set(cats_list) - set(categories_list))
new_cats = list(set(categories_list) - set(cats_list))
for dc in del_cats:
PostPostCategory.remove(post_id=self.id, category_id=dc) # depends on [control=['for'], data=['dc']]
for nc in new_cats:
PostPostCategory.add(post_id=self.id, category_id=nc) # depends on [control=['for'], data=['nc']]
@property
def status(self):
if self.is_published:
return 'Published' # depends on [control=['if'], data=[]]
elif self.is_draft:
return 'Draft' # depends on [control=['if'], data=[]]
elif self.is_revision:
return 'Revision' # depends on [control=['if'], data=[]]
def delete_revisions(self):
"""
Delete all revisions
"""
try:
Post.all().filter(Post.post_id == self.id).filter(Post.is_revision == True).delete()
Post.db.commit() # depends on [control=['try'], data=[]]
except Exception as ex:
Post.db.rollback() # depends on [control=['except'], data=[]]
@property
def total_revisions(self):
return Post.all().filter(Post.post_id == self.id).filter(Post.is_revision == True).count()
return utils.to_struct(Post=Post, Category=PostCategory, Type=PostType, PostCategory=PostPostCategory)
|
def flatten_all(list_of_list):
"""Flatten arbitrary depth of nesting. Good for unknown nesting structure
iterable object.
Usage::
>>> flatten_all([[0, 1], [2, 3, [4, 5], [6, 7, 8]], [9,]])
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
for i in list_of_list:
if hasattr(i, "__iter__"):
for j in flatten_all(i):
yield j
else:
yield i
|
def function[flatten_all, parameter[list_of_list]]:
constant[Flatten arbitrary depth of nesting. Good for unknown nesting structure
iterable object.
Usage::
>>> flatten_all([[0, 1], [2, 3, [4, 5], [6, 7, 8]], [9,]])
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
]
for taget[name[i]] in starred[name[list_of_list]] begin[:]
if call[name[hasattr], parameter[name[i], constant[__iter__]]] begin[:]
for taget[name[j]] in starred[call[name[flatten_all], parameter[name[i]]]] begin[:]
<ast.Yield object at 0x7da20cabdff0>
|
keyword[def] identifier[flatten_all] ( identifier[list_of_list] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[list_of_list] :
keyword[if] identifier[hasattr] ( identifier[i] , literal[string] ):
keyword[for] identifier[j] keyword[in] identifier[flatten_all] ( identifier[i] ):
keyword[yield] identifier[j]
keyword[else] :
keyword[yield] identifier[i]
|
def flatten_all(list_of_list):
"""Flatten arbitrary depth of nesting. Good for unknown nesting structure
iterable object.
Usage::
>>> flatten_all([[0, 1], [2, 3, [4, 5], [6, 7, 8]], [9,]])
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
for i in list_of_list:
if hasattr(i, '__iter__'):
for j in flatten_all(i):
yield j # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]]
else:
yield i # depends on [control=['for'], data=['i']]
|
def convert_fully_connected(node, **kwargs):
"""Map MXNet's FullyConnected operator attributes to onnx's Gemm operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
initializer = kwargs["initializer"]
no_bias = get_boolean_attribute_value(attrs, "no_bias")
fcnode = []
op_name = "flatten_" + str(kwargs["idx"])
flatten_node = onnx.helper.make_node(
'Flatten',
inputs=[input_nodes[0]],
outputs=[op_name],
name=op_name
)
input_nodes[0] = op_name
fcnode.append(flatten_node)
if no_bias:
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]
bias_name = "bias" + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,))
initializer.append(
onnx.helper.make_tensor(
name=bias_name,
data_type=data_type,
dims=(1,),
vals=[0],
raw=False,
)
)
input_nodes.append(bias_name)
fcnode.append(tensor_node)
node = onnx.helper.make_node(
"Gemm",
input_nodes, # input (A, B, C) - C can be in place
[name], # output
alpha=1.0,
beta=1.0,
transA=False,
transB=True,
name=name
)
fcnode.append(node)
return fcnode
|
def function[convert_fully_connected, parameter[node]]:
constant[Map MXNet's FullyConnected operator attributes to onnx's Gemm operator
and return the created node.
]
<ast.Tuple object at 0x7da1b1ef1cc0> assign[=] call[name[get_inputs], parameter[name[node], name[kwargs]]]
variable[initializer] assign[=] call[name[kwargs]][constant[initializer]]
variable[no_bias] assign[=] call[name[get_boolean_attribute_value], parameter[name[attrs], constant[no_bias]]]
variable[fcnode] assign[=] list[[]]
variable[op_name] assign[=] binary_operation[constant[flatten_] + call[name[str], parameter[call[name[kwargs]][constant[idx]]]]]
variable[flatten_node] assign[=] call[name[onnx].helper.make_node, parameter[constant[Flatten]]]
call[name[input_nodes]][constant[0]] assign[=] name[op_name]
call[name[fcnode].append, parameter[name[flatten_node]]]
if name[no_bias] begin[:]
variable[data_type] assign[=] call[name[onnx].mapping.NP_TYPE_TO_TENSOR_TYPE][call[name[np].dtype, parameter[constant[int64]]]]
variable[bias_name] assign[=] binary_operation[constant[bias] + call[name[str], parameter[call[name[kwargs]][constant[idx]]]]]
variable[tensor_node] assign[=] call[name[onnx].helper.make_tensor_value_info, parameter[name[bias_name], name[data_type], tuple[[<ast.Constant object at 0x7da1b20131c0>]]]]
call[name[initializer].append, parameter[call[name[onnx].helper.make_tensor, parameter[]]]]
call[name[input_nodes].append, parameter[name[bias_name]]]
call[name[fcnode].append, parameter[name[tensor_node]]]
variable[node] assign[=] call[name[onnx].helper.make_node, parameter[constant[Gemm], name[input_nodes], list[[<ast.Name object at 0x7da1b2013670>]]]]
call[name[fcnode].append, parameter[name[node]]]
return[name[fcnode]]
|
keyword[def] identifier[convert_fully_connected] ( identifier[node] ,** identifier[kwargs] ):
literal[string]
identifier[name] , identifier[input_nodes] , identifier[attrs] = identifier[get_inputs] ( identifier[node] , identifier[kwargs] )
identifier[initializer] = identifier[kwargs] [ literal[string] ]
identifier[no_bias] = identifier[get_boolean_attribute_value] ( identifier[attrs] , literal[string] )
identifier[fcnode] =[]
identifier[op_name] = literal[string] + identifier[str] ( identifier[kwargs] [ literal[string] ])
identifier[flatten_node] = identifier[onnx] . identifier[helper] . identifier[make_node] (
literal[string] ,
identifier[inputs] =[ identifier[input_nodes] [ literal[int] ]],
identifier[outputs] =[ identifier[op_name] ],
identifier[name] = identifier[op_name]
)
identifier[input_nodes] [ literal[int] ]= identifier[op_name]
identifier[fcnode] . identifier[append] ( identifier[flatten_node] )
keyword[if] identifier[no_bias] :
identifier[data_type] = identifier[onnx] . identifier[mapping] . identifier[NP_TYPE_TO_TENSOR_TYPE] [ identifier[np] . identifier[dtype] ( literal[string] )]
identifier[bias_name] = literal[string] + identifier[str] ( identifier[kwargs] [ literal[string] ])
identifier[tensor_node] = identifier[onnx] . identifier[helper] . identifier[make_tensor_value_info] ( identifier[bias_name] , identifier[data_type] ,( literal[int] ,))
identifier[initializer] . identifier[append] (
identifier[onnx] . identifier[helper] . identifier[make_tensor] (
identifier[name] = identifier[bias_name] ,
identifier[data_type] = identifier[data_type] ,
identifier[dims] =( literal[int] ,),
identifier[vals] =[ literal[int] ],
identifier[raw] = keyword[False] ,
)
)
identifier[input_nodes] . identifier[append] ( identifier[bias_name] )
identifier[fcnode] . identifier[append] ( identifier[tensor_node] )
identifier[node] = identifier[onnx] . identifier[helper] . identifier[make_node] (
literal[string] ,
identifier[input_nodes] ,
[ identifier[name] ],
identifier[alpha] = literal[int] ,
identifier[beta] = literal[int] ,
identifier[transA] = keyword[False] ,
identifier[transB] = keyword[True] ,
identifier[name] = identifier[name]
)
identifier[fcnode] . identifier[append] ( identifier[node] )
keyword[return] identifier[fcnode]
|
def convert_fully_connected(node, **kwargs):
"""Map MXNet's FullyConnected operator attributes to onnx's Gemm operator
and return the created node.
"""
(name, input_nodes, attrs) = get_inputs(node, kwargs)
initializer = kwargs['initializer']
no_bias = get_boolean_attribute_value(attrs, 'no_bias')
fcnode = []
op_name = 'flatten_' + str(kwargs['idx'])
flatten_node = onnx.helper.make_node('Flatten', inputs=[input_nodes[0]], outputs=[op_name], name=op_name)
input_nodes[0] = op_name
fcnode.append(flatten_node)
if no_bias:
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]
bias_name = 'bias' + str(kwargs['idx'])
tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,))
initializer.append(onnx.helper.make_tensor(name=bias_name, data_type=data_type, dims=(1,), vals=[0], raw=False))
input_nodes.append(bias_name)
fcnode.append(tensor_node) # depends on [control=['if'], data=[]] # input (A, B, C) - C can be in place
# output
node = onnx.helper.make_node('Gemm', input_nodes, [name], alpha=1.0, beta=1.0, transA=False, transB=True, name=name)
fcnode.append(node)
return fcnode
|
def add_firewalld_port(port, permanent=True):
""" adds a firewall rule """
yum_install(packages=['firewalld'])
log_green('adding a new fw rule: %s' % port)
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
p = ''
if permanent:
p = '--permanent'
sudo('firewall-cmd --add-port %s %s' % (port, p))
sudo('systemctl restart firewalld')
|
def function[add_firewalld_port, parameter[port, permanent]]:
constant[ adds a firewall rule ]
call[name[yum_install], parameter[]]
call[name[log_green], parameter[binary_operation[constant[adding a new fw rule: %s] <ast.Mod object at 0x7da2590d6920> name[port]]]]
with call[name[settings], parameter[call[name[hide], parameter[constant[warnings], constant[running], constant[stdout], constant[stderr]]]]] begin[:]
variable[p] assign[=] constant[]
if name[permanent] begin[:]
variable[p] assign[=] constant[--permanent]
call[name[sudo], parameter[binary_operation[constant[firewall-cmd --add-port %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b11a17b0>, <ast.Name object at 0x7da1b11a0af0>]]]]]
call[name[sudo], parameter[constant[systemctl restart firewalld]]]
|
keyword[def] identifier[add_firewalld_port] ( identifier[port] , identifier[permanent] = keyword[True] ):
literal[string]
identifier[yum_install] ( identifier[packages] =[ literal[string] ])
identifier[log_green] ( literal[string] % identifier[port] )
keyword[with] identifier[settings] ( identifier[hide] ( literal[string] , literal[string] , literal[string] , literal[string] ),
identifier[warn_only] = keyword[True] , identifier[capture] = keyword[True] ):
identifier[p] = literal[string]
keyword[if] identifier[permanent] :
identifier[p] = literal[string]
identifier[sudo] ( literal[string] %( identifier[port] , identifier[p] ))
identifier[sudo] ( literal[string] )
|
def add_firewalld_port(port, permanent=True):
""" adds a firewall rule """
yum_install(packages=['firewalld'])
log_green('adding a new fw rule: %s' % port)
with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True):
p = ''
if permanent:
p = '--permanent' # depends on [control=['if'], data=[]]
sudo('firewall-cmd --add-port %s %s' % (port, p))
sudo('systemctl restart firewalld') # depends on [control=['with'], data=[]]
|
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
logits, losses = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return logits, logits, losses # Raw numbers returned from real modality.
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses
|
def function[sample, parameter[self, features]]:
constant[Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
]
<ast.Tuple object at 0x7da1b1e10e20> assign[=] call[name[self], parameter[name[features]]]
if name[self]._target_modality_is_real begin[:]
return[tuple[[<ast.Name object at 0x7da1b1e12770>, <ast.Name object at 0x7da1b1e13370>, <ast.Name object at 0x7da1b1e10130>]]]
if compare[name[self].hparams.sampling_method equal[==] constant[argmax]] begin[:]
variable[samples] assign[=] call[name[tf].argmax, parameter[name[logits]]]
return[tuple[[<ast.Name object at 0x7da1b2059150>, <ast.Name object at 0x7da1b2058910>, <ast.Name object at 0x7da1b2059630>]]]
|
keyword[def] identifier[sample] ( identifier[self] , identifier[features] ):
literal[string]
identifier[logits] , identifier[losses] = identifier[self] ( identifier[features] )
keyword[if] identifier[self] . identifier[_target_modality_is_real] :
keyword[return] identifier[logits] , identifier[logits] , identifier[losses]
keyword[if] identifier[self] . identifier[hparams] . identifier[sampling_method] == literal[string] :
identifier[samples] = identifier[tf] . identifier[argmax] ( identifier[logits] , identifier[axis] =- literal[int] )
keyword[else] :
keyword[assert] identifier[self] . identifier[hparams] . identifier[sampling_method] == literal[string]
keyword[def] identifier[multinomial_squeeze] ( identifier[logits] , identifier[temperature] = literal[int] ):
identifier[logits_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[logits] )
identifier[reshaped_logits] =(
identifier[tf] . identifier[reshape] ( identifier[logits] ,[- literal[int] , identifier[logits_shape] [- literal[int] ]])/ identifier[temperature] )
identifier[choices] = identifier[tf] . identifier[multinomial] ( identifier[reshaped_logits] , literal[int] )
identifier[choices] = identifier[tf] . identifier[reshape] ( identifier[choices] , identifier[logits_shape] [:- literal[int] ])
keyword[return] identifier[choices]
identifier[samples] = identifier[multinomial_squeeze] ( identifier[logits] , identifier[self] . identifier[hparams] . identifier[sampling_temp] )
keyword[return] identifier[samples] , identifier[logits] , identifier[losses]
|
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
(logits, losses) = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return (logits, logits, losses) # Raw numbers returned from real modality. # depends on [control=['if'], data=[]]
if self.hparams.sampling_method == 'argmax':
samples = tf.argmax(logits, axis=-1) # depends on [control=['if'], data=[]]
else:
assert self.hparams.sampling_method == 'random'
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = tf.reshape(logits, [-1, logits_shape[-1]]) / temperature
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return (samples, logits, losses)
|
def _build(self, *args):
"""Connects the Sequential module into the graph.
Args:
*args: A tuple of inputs, to be unpacked as the arguments to the first
layer.
Returns:
The output value of the last layer.
"""
net = args
if not self._layers:
# If the sequential is passed a single arg, this will end up being
# wrapped in an extra layer of tuple by *args. Normally we internally
# handle this in the loop below, but if there are no layers we unpack here
# in order to make Sequential([]) act like an identity, which seems right.
if len(args) == 1:
return args[0]
else:
return args
for layer in self._layers:
if isinstance(net, tuple):
net = layer(*net)
else:
net = layer(net)
return net
|
def function[_build, parameter[self]]:
constant[Connects the Sequential module into the graph.
Args:
*args: A tuple of inputs, to be unpacked as the arguments to the first
layer.
Returns:
The output value of the last layer.
]
variable[net] assign[=] name[args]
if <ast.UnaryOp object at 0x7da1b1c1b880> begin[:]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[1]] begin[:]
return[call[name[args]][constant[0]]]
for taget[name[layer]] in starred[name[self]._layers] begin[:]
if call[name[isinstance], parameter[name[net], name[tuple]]] begin[:]
variable[net] assign[=] call[name[layer], parameter[<ast.Starred object at 0x7da1b1cca7a0>]]
return[name[net]]
|
keyword[def] identifier[_build] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[net] = identifier[args]
keyword[if] keyword[not] identifier[self] . identifier[_layers] :
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
keyword[return] identifier[args] [ literal[int] ]
keyword[else] :
keyword[return] identifier[args]
keyword[for] identifier[layer] keyword[in] identifier[self] . identifier[_layers] :
keyword[if] identifier[isinstance] ( identifier[net] , identifier[tuple] ):
identifier[net] = identifier[layer] (* identifier[net] )
keyword[else] :
identifier[net] = identifier[layer] ( identifier[net] )
keyword[return] identifier[net]
|
def _build(self, *args):
"""Connects the Sequential module into the graph.
Args:
*args: A tuple of inputs, to be unpacked as the arguments to the first
layer.
Returns:
The output value of the last layer.
"""
net = args
if not self._layers:
# If the sequential is passed a single arg, this will end up being
# wrapped in an extra layer of tuple by *args. Normally we internally
# handle this in the loop below, but if there are no layers we unpack here
# in order to make Sequential([]) act like an identity, which seems right.
if len(args) == 1:
return args[0] # depends on [control=['if'], data=[]]
else:
return args # depends on [control=['if'], data=[]]
for layer in self._layers:
if isinstance(net, tuple):
net = layer(*net) # depends on [control=['if'], data=[]]
else:
net = layer(net) # depends on [control=['for'], data=['layer']]
return net
|
def get_element_by_class_name_or_raise(self, class_name):
"""Return the SchemaElement for the specified class name, asserting that it exists."""
if class_name not in self._elements:
raise InvalidClassError(u'Class does not exist: {}'.format(class_name))
return self._elements[class_name]
|
def function[get_element_by_class_name_or_raise, parameter[self, class_name]]:
constant[Return the SchemaElement for the specified class name, asserting that it exists.]
if compare[name[class_name] <ast.NotIn object at 0x7da2590d7190> name[self]._elements] begin[:]
<ast.Raise object at 0x7da204567190>
return[call[name[self]._elements][name[class_name]]]
|
keyword[def] identifier[get_element_by_class_name_or_raise] ( identifier[self] , identifier[class_name] ):
literal[string]
keyword[if] identifier[class_name] keyword[not] keyword[in] identifier[self] . identifier[_elements] :
keyword[raise] identifier[InvalidClassError] ( literal[string] . identifier[format] ( identifier[class_name] ))
keyword[return] identifier[self] . identifier[_elements] [ identifier[class_name] ]
|
def get_element_by_class_name_or_raise(self, class_name):
"""Return the SchemaElement for the specified class name, asserting that it exists."""
if class_name not in self._elements:
raise InvalidClassError(u'Class does not exist: {}'.format(class_name)) # depends on [control=['if'], data=['class_name']]
return self._elements[class_name]
|
def record_migration(plugin, filename, script, **kwargs):
'''Only record a migration without applying it'''
db = get_db()
db.eval(RECORD_WRAPPER, plugin, filename, script)
return True
|
def function[record_migration, parameter[plugin, filename, script]]:
constant[Only record a migration without applying it]
variable[db] assign[=] call[name[get_db], parameter[]]
call[name[db].eval, parameter[name[RECORD_WRAPPER], name[plugin], name[filename], name[script]]]
return[constant[True]]
|
keyword[def] identifier[record_migration] ( identifier[plugin] , identifier[filename] , identifier[script] ,** identifier[kwargs] ):
literal[string]
identifier[db] = identifier[get_db] ()
identifier[db] . identifier[eval] ( identifier[RECORD_WRAPPER] , identifier[plugin] , identifier[filename] , identifier[script] )
keyword[return] keyword[True]
|
def record_migration(plugin, filename, script, **kwargs):
"""Only record a migration without applying it"""
db = get_db()
db.eval(RECORD_WRAPPER, plugin, filename, script)
return True
|
def set_paths_caching_params(self, timeout=None, cache_name=None):
"""Use the uWSGI caching subsystem to store mappings from URI to filesystem paths.
* http://uwsgi.readthedocs.io/en/latest/StaticFiles.html#caching-paths-mappings-resolutions
:param int timeout: Amount of seconds to put resolved paths in the uWSGI cache.
:param str|unicode cache_name: Cache name to use for static paths.
"""
self._set('static-cache-paths', timeout)
self._set('static-cache-paths-name', cache_name)
return self._section
|
def function[set_paths_caching_params, parameter[self, timeout, cache_name]]:
constant[Use the uWSGI caching subsystem to store mappings from URI to filesystem paths.
* http://uwsgi.readthedocs.io/en/latest/StaticFiles.html#caching-paths-mappings-resolutions
:param int timeout: Amount of seconds to put resolved paths in the uWSGI cache.
:param str|unicode cache_name: Cache name to use for static paths.
]
call[name[self]._set, parameter[constant[static-cache-paths], name[timeout]]]
call[name[self]._set, parameter[constant[static-cache-paths-name], name[cache_name]]]
return[name[self]._section]
|
keyword[def] identifier[set_paths_caching_params] ( identifier[self] , identifier[timeout] = keyword[None] , identifier[cache_name] = keyword[None] ):
literal[string]
identifier[self] . identifier[_set] ( literal[string] , identifier[timeout] )
identifier[self] . identifier[_set] ( literal[string] , identifier[cache_name] )
keyword[return] identifier[self] . identifier[_section]
|
def set_paths_caching_params(self, timeout=None, cache_name=None):
"""Use the uWSGI caching subsystem to store mappings from URI to filesystem paths.
* http://uwsgi.readthedocs.io/en/latest/StaticFiles.html#caching-paths-mappings-resolutions
:param int timeout: Amount of seconds to put resolved paths in the uWSGI cache.
:param str|unicode cache_name: Cache name to use for static paths.
"""
self._set('static-cache-paths', timeout)
self._set('static-cache-paths-name', cache_name)
return self._section
|
def activate_firmware_and_wait(self, rollback_override=None,
timeout=2, interval=1):
""" Activate the new uploaded firmware and wait for
long running command. """
try:
self.activate_firmware(rollback_override)
except CompletionCodeError as e:
if e.cc == CC_LONG_DURATION_CMD_IN_PROGRESS:
self.wait_for_long_duration_command(
constants.CMDID_HPM_ACTIVATE_FIRMWARE,
timeout, interval)
else:
raise HpmError('activate_firmware CC=0x%02x' % e.cc)
except IpmiTimeoutError:
# controller is in reset and flashed new firmware
pass
|
def function[activate_firmware_and_wait, parameter[self, rollback_override, timeout, interval]]:
constant[ Activate the new uploaded firmware and wait for
long running command. ]
<ast.Try object at 0x7da1b077a800>
|
keyword[def] identifier[activate_firmware_and_wait] ( identifier[self] , identifier[rollback_override] = keyword[None] ,
identifier[timeout] = literal[int] , identifier[interval] = literal[int] ):
literal[string]
keyword[try] :
identifier[self] . identifier[activate_firmware] ( identifier[rollback_override] )
keyword[except] identifier[CompletionCodeError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[cc] == identifier[CC_LONG_DURATION_CMD_IN_PROGRESS] :
identifier[self] . identifier[wait_for_long_duration_command] (
identifier[constants] . identifier[CMDID_HPM_ACTIVATE_FIRMWARE] ,
identifier[timeout] , identifier[interval] )
keyword[else] :
keyword[raise] identifier[HpmError] ( literal[string] % identifier[e] . identifier[cc] )
keyword[except] identifier[IpmiTimeoutError] :
keyword[pass]
|
def activate_firmware_and_wait(self, rollback_override=None, timeout=2, interval=1):
""" Activate the new uploaded firmware and wait for
long running command. """
try:
self.activate_firmware(rollback_override) # depends on [control=['try'], data=[]]
except CompletionCodeError as e:
if e.cc == CC_LONG_DURATION_CMD_IN_PROGRESS:
self.wait_for_long_duration_command(constants.CMDID_HPM_ACTIVATE_FIRMWARE, timeout, interval) # depends on [control=['if'], data=[]]
else:
raise HpmError('activate_firmware CC=0x%02x' % e.cc) # depends on [control=['except'], data=['e']]
except IpmiTimeoutError:
# controller is in reset and flashed new firmware
pass # depends on [control=['except'], data=[]]
|
def search(self, query, *, max_results=100, **kwargs):
"""Search Google Music and library for content.
Parameters:
query (str): Search text.
max_results (int, Optional): Maximum number of results per type per
location to retrieve. I.e up to 100 Google and 100 library
for a total of 200 for the default value.
Google only accepts values up to 100.
Default: ``100``
kwargs (bool, Optional): Any of ``albums``, ``artists``, ``genres``,
``playlists``, ``podcasts``, ``situations``, ``songs``, ``stations``,
``videos`` set to ``True`` will include that result type in the
returned dict.
Setting none of them will include all result types in the returned dict.
Returns:
dict: A dict of results separated into keys: ``'albums'``, ``'artists'``,
``'genres'``, ``'playlists'``, ```'podcasts'``, ``'situations'``,
``'songs'``, ``'stations'``, ``'videos'``.
Note:
Free account search is restricted so may not contain hits for all result types.
"""
results = defaultdict(list)
for type_, results_ in self.search_library(
query,
max_results=max_results,
**kwargs
).items():
results[type_].extend(results_)
for type_, results_ in self.search_google(
query,
max_results=max_results,
**kwargs
).items():
results[type_].extend(results_)
return dict(results)
|
def function[search, parameter[self, query]]:
constant[Search Google Music and library for content.
Parameters:
query (str): Search text.
max_results (int, Optional): Maximum number of results per type per
location to retrieve. I.e up to 100 Google and 100 library
for a total of 200 for the default value.
Google only accepts values up to 100.
Default: ``100``
kwargs (bool, Optional): Any of ``albums``, ``artists``, ``genres``,
``playlists``, ``podcasts``, ``situations``, ``songs``, ``stations``,
``videos`` set to ``True`` will include that result type in the
returned dict.
Setting none of them will include all result types in the returned dict.
Returns:
dict: A dict of results separated into keys: ``'albums'``, ``'artists'``,
``'genres'``, ``'playlists'``, ```'podcasts'``, ``'situations'``,
``'songs'``, ``'stations'``, ``'videos'``.
Note:
Free account search is restricted so may not contain hits for all result types.
]
variable[results] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da207f9aa10>, <ast.Name object at 0x7da207f9b6d0>]]] in starred[call[call[name[self].search_library, parameter[name[query]]].items, parameter[]]] begin[:]
call[call[name[results]][name[type_]].extend, parameter[name[results_]]]
for taget[tuple[[<ast.Name object at 0x7da18ede7730>, <ast.Name object at 0x7da18ede4c40>]]] in starred[call[call[name[self].search_google, parameter[name[query]]].items, parameter[]]] begin[:]
call[call[name[results]][name[type_]].extend, parameter[name[results_]]]
return[call[name[dict], parameter[name[results]]]]
|
keyword[def] identifier[search] ( identifier[self] , identifier[query] ,*, identifier[max_results] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[results] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[type_] , identifier[results_] keyword[in] identifier[self] . identifier[search_library] (
identifier[query] ,
identifier[max_results] = identifier[max_results] ,
** identifier[kwargs]
). identifier[items] ():
identifier[results] [ identifier[type_] ]. identifier[extend] ( identifier[results_] )
keyword[for] identifier[type_] , identifier[results_] keyword[in] identifier[self] . identifier[search_google] (
identifier[query] ,
identifier[max_results] = identifier[max_results] ,
** identifier[kwargs]
). identifier[items] ():
identifier[results] [ identifier[type_] ]. identifier[extend] ( identifier[results_] )
keyword[return] identifier[dict] ( identifier[results] )
|
def search(self, query, *, max_results=100, **kwargs):
"""Search Google Music and library for content.
Parameters:
query (str): Search text.
max_results (int, Optional): Maximum number of results per type per
location to retrieve. I.e up to 100 Google and 100 library
for a total of 200 for the default value.
Google only accepts values up to 100.
Default: ``100``
kwargs (bool, Optional): Any of ``albums``, ``artists``, ``genres``,
``playlists``, ``podcasts``, ``situations``, ``songs``, ``stations``,
``videos`` set to ``True`` will include that result type in the
returned dict.
Setting none of them will include all result types in the returned dict.
Returns:
dict: A dict of results separated into keys: ``'albums'``, ``'artists'``,
``'genres'``, ``'playlists'``, ```'podcasts'``, ``'situations'``,
``'songs'``, ``'stations'``, ``'videos'``.
Note:
Free account search is restricted so may not contain hits for all result types.
"""
results = defaultdict(list)
for (type_, results_) in self.search_library(query, max_results=max_results, **kwargs).items():
results[type_].extend(results_) # depends on [control=['for'], data=[]]
for (type_, results_) in self.search_google(query, max_results=max_results, **kwargs).items():
results[type_].extend(results_) # depends on [control=['for'], data=[]]
return dict(results)
|
def get_urls_and_locations(self, urls):
"""Get URLs and their redirection addresses.
:param urls: a list of URL addresses
:returns: an instance of CachedIterable containing given URLs
and valid location header values of their responses
"""
location_generator = self.get_new_locations(urls)
initial_cache = list(set(urls))
return CachedIterable(location_generator, initial_cache)
|
def function[get_urls_and_locations, parameter[self, urls]]:
constant[Get URLs and their redirection addresses.
:param urls: a list of URL addresses
:returns: an instance of CachedIterable containing given URLs
and valid location header values of their responses
]
variable[location_generator] assign[=] call[name[self].get_new_locations, parameter[name[urls]]]
variable[initial_cache] assign[=] call[name[list], parameter[call[name[set], parameter[name[urls]]]]]
return[call[name[CachedIterable], parameter[name[location_generator], name[initial_cache]]]]
|
keyword[def] identifier[get_urls_and_locations] ( identifier[self] , identifier[urls] ):
literal[string]
identifier[location_generator] = identifier[self] . identifier[get_new_locations] ( identifier[urls] )
identifier[initial_cache] = identifier[list] ( identifier[set] ( identifier[urls] ))
keyword[return] identifier[CachedIterable] ( identifier[location_generator] , identifier[initial_cache] )
|
def get_urls_and_locations(self, urls):
"""Get URLs and their redirection addresses.
:param urls: a list of URL addresses
:returns: an instance of CachedIterable containing given URLs
and valid location header values of their responses
"""
location_generator = self.get_new_locations(urls)
initial_cache = list(set(urls))
return CachedIterable(location_generator, initial_cache)
|
def plotting_context(context=None, font_scale=1, rc=None):
"""Return a parameter dict to scale elements of the figure.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
This function returns an object that can be used in a ``with`` statement
to temporarily change the context parameters.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> c = plotting_context("poster")
>>> c = plotting_context("notebook", font_scale=1.5)
>>> c = plotting_context("talk", rc={"lines.linewidth": 2})
>>> import matplotlib.pyplot as plt
>>> with plotting_context("paper"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_context : set the matplotlib parameters to scale plot elements
axes_style : return a dict of parameters defining a figure style
color_palette : define the color palette for a plot
"""
if context is None:
context_dict = {k: mpl.rcParams[k] for k in _context_keys}
elif isinstance(context, dict):
context_dict = context
else:
contexts = ["paper", "notebook", "talk", "poster"]
if context not in contexts:
raise ValueError("context must be in %s" % ", ".join(contexts))
# Set up dictionary of default parameters
base_context = {
"figure.figsize": np.array([8, 5.5]),
"font.size": 12,
"axes.labelsize": 11,
"axes.titlesize": 12,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"legend.fontsize": 10,
"grid.linewidth": 1,
"lines.linewidth": 1.75,
"patch.linewidth": .3,
"lines.markersize": 7,
"lines.markeredgewidth": 0,
"xtick.major.width": 1,
"ytick.major.width": 1,
"xtick.minor.width": .5,
"ytick.minor.width": .5,
"xtick.major.pad": 7,
"ytick.major.pad": 7,
}
# Scale all the parameters by the same factor depending on the context
scaling = dict(paper=.8, notebook=1, talk=1.3, poster=1.6)[context]
context_dict = {k: v * scaling for k, v in base_context.items()}
# Now independently scale the fonts
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize", "font.size"]
font_dict = {k: context_dict[k] * font_scale for k in font_keys}
context_dict.update(font_dict)
# Implement hack workaround for matplotlib bug
# See https://github.com/mwaskom/seaborn/issues/344
# There is a bug in matplotlib 1.4.2 that makes points invisible when
# they don't have an edgewidth. It will supposedly be fixed in 1.4.3.
if mpl.__version__ == "1.4.2":
context_dict["lines.markeredgewidth"] = 0.01
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _context_keys}
context_dict.update(rc)
# Wrap in a _PlottingContext object so this can be used in a with statement
context_object = _PlottingContext(context_dict)
return context_object
|
def function[plotting_context, parameter[context, font_scale, rc]]:
constant[Return a parameter dict to scale elements of the figure.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
This function returns an object that can be used in a ``with`` statement
to temporarily change the context parameters.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> c = plotting_context("poster")
>>> c = plotting_context("notebook", font_scale=1.5)
>>> c = plotting_context("talk", rc={"lines.linewidth": 2})
>>> import matplotlib.pyplot as plt
>>> with plotting_context("paper"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_context : set the matplotlib parameters to scale plot elements
axes_style : return a dict of parameters defining a figure style
color_palette : define the color palette for a plot
]
if compare[name[context] is constant[None]] begin[:]
variable[context_dict] assign[=] <ast.DictComp object at 0x7da18dc9a3b0>
if compare[name[mpl].__version__ equal[==] constant[1.4.2]] begin[:]
call[name[context_dict]][constant[lines.markeredgewidth]] assign[=] constant[0.01]
if compare[name[rc] is_not constant[None]] begin[:]
variable[rc] assign[=] <ast.DictComp object at 0x7da2044c2110>
call[name[context_dict].update, parameter[name[rc]]]
variable[context_object] assign[=] call[name[_PlottingContext], parameter[name[context_dict]]]
return[name[context_object]]
|
keyword[def] identifier[plotting_context] ( identifier[context] = keyword[None] , identifier[font_scale] = literal[int] , identifier[rc] = keyword[None] ):
literal[string]
keyword[if] identifier[context] keyword[is] keyword[None] :
identifier[context_dict] ={ identifier[k] : identifier[mpl] . identifier[rcParams] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[_context_keys] }
keyword[elif] identifier[isinstance] ( identifier[context] , identifier[dict] ):
identifier[context_dict] = identifier[context]
keyword[else] :
identifier[contexts] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[context] keyword[not] keyword[in] identifier[contexts] :
keyword[raise] identifier[ValueError] ( literal[string] % literal[string] . identifier[join] ( identifier[contexts] ))
identifier[base_context] ={
literal[string] : identifier[np] . identifier[array] ([ literal[int] , literal[int] ]),
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
}
identifier[scaling] = identifier[dict] ( identifier[paper] = literal[int] , identifier[notebook] = literal[int] , identifier[talk] = literal[int] , identifier[poster] = literal[int] )[ identifier[context] ]
identifier[context_dict] ={ identifier[k] : identifier[v] * identifier[scaling] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[base_context] . identifier[items] ()}
identifier[font_keys] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ]
identifier[font_dict] ={ identifier[k] : identifier[context_dict] [ identifier[k] ]* identifier[font_scale] keyword[for] identifier[k] keyword[in] identifier[font_keys] }
identifier[context_dict] . identifier[update] ( identifier[font_dict] )
keyword[if] identifier[mpl] . identifier[__version__] == literal[string] :
identifier[context_dict] [ literal[string] ]= literal[int]
keyword[if] identifier[rc] keyword[is] keyword[not] keyword[None] :
identifier[rc] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[rc] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[_context_keys] }
identifier[context_dict] . identifier[update] ( identifier[rc] )
identifier[context_object] = identifier[_PlottingContext] ( identifier[context_dict] )
keyword[return] identifier[context_object]
|
def plotting_context(context=None, font_scale=1, rc=None):
"""Return a parameter dict to scale elements of the figure.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
This function returns an object that can be used in a ``with`` statement
to temporarily change the context parameters.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> c = plotting_context("poster")
>>> c = plotting_context("notebook", font_scale=1.5)
>>> c = plotting_context("talk", rc={"lines.linewidth": 2})
>>> import matplotlib.pyplot as plt
>>> with plotting_context("paper"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_context : set the matplotlib parameters to scale plot elements
axes_style : return a dict of parameters defining a figure style
color_palette : define the color palette for a plot
"""
if context is None:
context_dict = {k: mpl.rcParams[k] for k in _context_keys} # depends on [control=['if'], data=[]]
elif isinstance(context, dict):
context_dict = context # depends on [control=['if'], data=[]]
else:
contexts = ['paper', 'notebook', 'talk', 'poster']
if context not in contexts:
raise ValueError('context must be in %s' % ', '.join(contexts)) # depends on [control=['if'], data=['contexts']]
# Set up dictionary of default parameters
base_context = {'figure.figsize': np.array([8, 5.5]), 'font.size': 12, 'axes.labelsize': 11, 'axes.titlesize': 12, 'xtick.labelsize': 10, 'ytick.labelsize': 10, 'legend.fontsize': 10, 'grid.linewidth': 1, 'lines.linewidth': 1.75, 'patch.linewidth': 0.3, 'lines.markersize': 7, 'lines.markeredgewidth': 0, 'xtick.major.width': 1, 'ytick.major.width': 1, 'xtick.minor.width': 0.5, 'ytick.minor.width': 0.5, 'xtick.major.pad': 7, 'ytick.major.pad': 7}
# Scale all the parameters by the same factor depending on the context
scaling = dict(paper=0.8, notebook=1, talk=1.3, poster=1.6)[context]
context_dict = {k: v * scaling for (k, v) in base_context.items()}
# Now independently scale the fonts
font_keys = ['axes.labelsize', 'axes.titlesize', 'legend.fontsize', 'xtick.labelsize', 'ytick.labelsize', 'font.size']
font_dict = {k: context_dict[k] * font_scale for k in font_keys}
context_dict.update(font_dict)
# Implement hack workaround for matplotlib bug
# See https://github.com/mwaskom/seaborn/issues/344
# There is a bug in matplotlib 1.4.2 that makes points invisible when
# they don't have an edgewidth. It will supposedly be fixed in 1.4.3.
if mpl.__version__ == '1.4.2':
context_dict['lines.markeredgewidth'] = 0.01 # depends on [control=['if'], data=[]]
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for (k, v) in rc.items() if k in _context_keys}
context_dict.update(rc) # depends on [control=['if'], data=['rc']]
# Wrap in a _PlottingContext object so this can be used in a with statement
context_object = _PlottingContext(context_dict)
return context_object
|
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
|
def function[_check_all_deps, parameter[self, pkg_name, pkg_file, formula_def]]:
constant[
Starting with one package, check all packages for dependencies
]
if <ast.BoolOp object at 0x7da20c6a8370> begin[:]
<ast.Raise object at 0x7da20c6a8eb0>
name[self].repo_metadata assign[=] call[name[self]._get_repo_metadata, parameter[]]
if <ast.UnaryOp object at 0x7da20c6a9ff0> begin[:]
for taget[name[repo]] in starred[name[self].repo_metadata] begin[:]
if <ast.UnaryOp object at 0x7da20c6a8130> begin[:]
continue
if compare[name[pkg_name] in call[call[name[self].repo_metadata][name[repo]]][constant[packages]]] begin[:]
variable[formula_def] assign[=] call[call[call[call[name[self].repo_metadata][name[repo]]][constant[packages]]][name[pkg_name]]][constant[info]]
if <ast.UnaryOp object at 0x7da20c6a8730> begin[:]
<ast.Raise object at 0x7da20c6ab6a0>
variable[pkg_info] assign[=] call[name[self]._pkgdb_fun, parameter[constant[info], name[pkg_name], name[self].db_conn]]
variable[pkgs_to_install] assign[=] list[[]]
if <ast.BoolOp object at 0x7da20c6ab1f0> begin[:]
call[name[pkgs_to_install].append, parameter[name[pkg_name]]]
variable[optional_install] assign[=] list[[]]
variable[recommended_install] assign[=] list[[]]
if <ast.BoolOp object at 0x7da20c6a8ac0> begin[:]
name[self].avail_pkgs assign[=] dictionary[[], []]
for taget[name[repo]] in starred[name[self].repo_metadata] begin[:]
if <ast.UnaryOp object at 0x7da20c6a9930> begin[:]
continue
for taget[name[pkg]] in starred[call[call[name[self].repo_metadata][name[repo]]][constant[packages]]] begin[:]
call[name[self].avail_pkgs][name[pkg]] assign[=] name[repo]
<ast.Tuple object at 0x7da20c6aaad0> assign[=] call[name[self]._resolve_deps, parameter[name[formula_def]]]
if name[unavail] begin[:]
<ast.Raise object at 0x7da20c6a8700>
if name[optional] begin[:]
call[name[optional_install].extend, parameter[name[optional]]]
for taget[name[dep_pkg]] in starred[name[optional]] begin[:]
variable[pkg_info] assign[=] call[name[self]._pkgdb_fun, parameter[constant[info], call[name[formula_def]][constant[name]]]]
variable[msg] assign[=] name[dep_pkg]
if call[name[isinstance], parameter[name[pkg_info], name[dict]]] begin[:]
variable[msg] assign[=] call[constant[{0} [Installed]].format, parameter[name[dep_pkg]]]
call[name[optional_install].append, parameter[name[msg]]]
if name[recommended] begin[:]
call[name[recommended_install].extend, parameter[name[recommended]]]
for taget[name[dep_pkg]] in starred[name[recommended]] begin[:]
variable[pkg_info] assign[=] call[name[self]._pkgdb_fun, parameter[constant[info], call[name[formula_def]][constant[name]]]]
variable[msg] assign[=] name[dep_pkg]
if call[name[isinstance], parameter[name[pkg_info], name[dict]]] begin[:]
variable[msg] assign[=] call[constant[{0} [Installed]].format, parameter[name[dep_pkg]]]
call[name[recommended_install].append, parameter[name[msg]]]
if name[needs] begin[:]
call[name[pkgs_to_install].extend, parameter[name[needs]]]
for taget[name[dep_pkg]] in starred[name[needs]] begin[:]
variable[pkg_info] assign[=] call[name[self]._pkgdb_fun, parameter[constant[info], call[name[formula_def]][constant[name]]]]
variable[msg] assign[=] name[dep_pkg]
if call[name[isinstance], parameter[name[pkg_info], name[dict]]] begin[:]
variable[msg] assign[=] call[constant[{0} [Installed]].format, parameter[name[dep_pkg]]]
return[tuple[[<ast.Name object at 0x7da20c6aaa10>, <ast.Name object at 0x7da20c6aac20>, <ast.Name object at 0x7da20c6a88b0>]]]
|
keyword[def] identifier[_check_all_deps] ( identifier[self] , identifier[pkg_name] = keyword[None] , identifier[pkg_file] = keyword[None] , identifier[formula_def] = keyword[None] ):
literal[string]
keyword[if] identifier[pkg_file] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[pkg_file] ):
keyword[raise] identifier[SPMInvocationError] ( literal[string] . identifier[format] ( identifier[pkg_file] ))
identifier[self] . identifier[repo_metadata] = identifier[self] . identifier[_get_repo_metadata] ()
keyword[if] keyword[not] identifier[formula_def] :
keyword[for] identifier[repo] keyword[in] identifier[self] . identifier[repo_metadata] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[repo_metadata] [ identifier[repo] ][ literal[string] ], identifier[dict] ):
keyword[continue]
keyword[if] identifier[pkg_name] keyword[in] identifier[self] . identifier[repo_metadata] [ identifier[repo] ][ literal[string] ]:
identifier[formula_def] = identifier[self] . identifier[repo_metadata] [ identifier[repo] ][ literal[string] ][ identifier[pkg_name] ][ literal[string] ]
keyword[if] keyword[not] identifier[formula_def] :
keyword[raise] identifier[SPMInvocationError] ( literal[string] . identifier[format] ( identifier[pkg_name] ))
identifier[pkg_info] = identifier[self] . identifier[_pkgdb_fun] ( literal[string] , identifier[pkg_name] , identifier[self] . identifier[db_conn] )
identifier[pkgs_to_install] =[]
keyword[if] identifier[pkg_info] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[opts] [ literal[string] ]:
identifier[pkgs_to_install] . identifier[append] ( identifier[pkg_name] )
keyword[elif] identifier[pkg_info] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[self] . identifier[opts] [ literal[string] ]:
keyword[raise] identifier[SPMPackageError] (
literal[string] . identifier[format] ( identifier[formula_def] [ literal[string] ])
)
identifier[optional_install] =[]
identifier[recommended_install] =[]
keyword[if] literal[string] keyword[in] identifier[formula_def] keyword[or] literal[string] keyword[in] identifier[formula_def] keyword[or] literal[string] keyword[in] identifier[formula_def] :
identifier[self] . identifier[avail_pkgs] ={}
keyword[for] identifier[repo] keyword[in] identifier[self] . identifier[repo_metadata] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[repo_metadata] [ identifier[repo] ][ literal[string] ], identifier[dict] ):
keyword[continue]
keyword[for] identifier[pkg] keyword[in] identifier[self] . identifier[repo_metadata] [ identifier[repo] ][ literal[string] ]:
identifier[self] . identifier[avail_pkgs] [ identifier[pkg] ]= identifier[repo]
identifier[needs] , identifier[unavail] , identifier[optional] , identifier[recommended] = identifier[self] . identifier[_resolve_deps] ( identifier[formula_def] )
keyword[if] identifier[unavail] :
keyword[raise] identifier[SPMPackageError] (
literal[string] . identifier[format] (
identifier[formula_def] [ literal[string] ], literal[string] . identifier[join] ( identifier[unavail] ))
)
keyword[if] identifier[optional] :
identifier[optional_install] . identifier[extend] ( identifier[optional] )
keyword[for] identifier[dep_pkg] keyword[in] identifier[optional] :
identifier[pkg_info] = identifier[self] . identifier[_pkgdb_fun] ( literal[string] , identifier[formula_def] [ literal[string] ])
identifier[msg] = identifier[dep_pkg]
keyword[if] identifier[isinstance] ( identifier[pkg_info] , identifier[dict] ):
identifier[msg] = literal[string] . identifier[format] ( identifier[dep_pkg] )
identifier[optional_install] . identifier[append] ( identifier[msg] )
keyword[if] identifier[recommended] :
identifier[recommended_install] . identifier[extend] ( identifier[recommended] )
keyword[for] identifier[dep_pkg] keyword[in] identifier[recommended] :
identifier[pkg_info] = identifier[self] . identifier[_pkgdb_fun] ( literal[string] , identifier[formula_def] [ literal[string] ])
identifier[msg] = identifier[dep_pkg]
keyword[if] identifier[isinstance] ( identifier[pkg_info] , identifier[dict] ):
identifier[msg] = literal[string] . identifier[format] ( identifier[dep_pkg] )
identifier[recommended_install] . identifier[append] ( identifier[msg] )
keyword[if] identifier[needs] :
identifier[pkgs_to_install] . identifier[extend] ( identifier[needs] )
keyword[for] identifier[dep_pkg] keyword[in] identifier[needs] :
identifier[pkg_info] = identifier[self] . identifier[_pkgdb_fun] ( literal[string] , identifier[formula_def] [ literal[string] ])
identifier[msg] = identifier[dep_pkg]
keyword[if] identifier[isinstance] ( identifier[pkg_info] , identifier[dict] ):
identifier[msg] = literal[string] . identifier[format] ( identifier[dep_pkg] )
keyword[return] identifier[pkgs_to_install] , identifier[optional_install] , identifier[recommended_install]
|
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
"""
Starting with one package, check all packages for dependencies
"""
if pkg_file and (not os.path.exists(pkg_file)):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file)) # depends on [control=['if'], data=[]]
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue # depends on [control=['if'], data=[]]
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info'] # depends on [control=['if'], data=['pkg_name']] # depends on [control=['for'], data=['repo']] # depends on [control=['if'], data=[]]
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name)) # depends on [control=['if'], data=[]]
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name) # depends on [control=['if'], data=[]]
elif pkg_info is not None and (not self.opts['force']):
raise SPMPackageError('Package {0} already installed, not installing again'.format(formula_def['name'])) # depends on [control=['if'], data=[]]
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue # depends on [control=['if'], data=[]]
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo # depends on [control=['for'], data=['pkg']] # depends on [control=['for'], data=['repo']]
(needs, unavail, optional, recommended) = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError('Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(formula_def['name'], '\n'.join(unavail))) # depends on [control=['if'], data=[]]
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg) # depends on [control=['if'], data=[]]
optional_install.append(msg) # depends on [control=['for'], data=['dep_pkg']] # depends on [control=['if'], data=[]]
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg) # depends on [control=['if'], data=[]]
recommended_install.append(msg) # depends on [control=['for'], data=['dep_pkg']] # depends on [control=['if'], data=[]]
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dep_pkg']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return (pkgs_to_install, optional_install, recommended_install)
|
def update_module_item(self, id, course_id, module_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_module_id=None, module_item_new_tab=None, module_item_position=None, module_item_published=None, module_item_title=None):
"""
Update a module item.
Update and return an existing module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module_item[title]
"""The name of the module item"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)"""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[external_url]
"""External url that the item points to. Only applies to 'ExternalUrl' type."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete, Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
# OPTIONAL - module_item[published]
"""Whether the module item is published and visible to students."""
if module_item_published is not None:
data["module_item[published]"] = module_item_published
# OPTIONAL - module_item[module_id]
"""Move this item to another module by specifying the target module id here.
The target module must be in the same course."""
if module_item_module_id is not None:
data["module_item[module_id]"] = module_item_module_id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
|
def function[update_module_item, parameter[self, id, course_id, module_id, module_item_completion_requirement_min_score, module_item_completion_requirement_type, module_item_external_url, module_item_indent, module_item_module_id, module_item_new_tab, module_item_position, module_item_published, module_item_title]]:
constant[
Update a module item.
Update and return an existing module item
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[ID]
call[name[path]][constant[course_id]] assign[=] name[course_id]
constant[ID]
call[name[path]][constant[module_id]] assign[=] name[module_id]
constant[ID]
call[name[path]][constant[id]] assign[=] name[id]
constant[The name of the module item]
if compare[name[module_item_title] is_not constant[None]] begin[:]
call[name[data]][constant[module_item[title]]] assign[=] name[module_item_title]
constant[The position of this item in the module (1-based)]
if compare[name[module_item_position] is_not constant[None]] begin[:]
call[name[data]][constant[module_item[position]]] assign[=] name[module_item_position]
constant[0-based indent level; module items may be indented to show a hierarchy]
if compare[name[module_item_indent] is_not constant[None]] begin[:]
call[name[data]][constant[module_item[indent]]] assign[=] name[module_item_indent]
constant[External url that the item points to. Only applies to 'ExternalUrl' type.]
if compare[name[module_item_external_url] is_not constant[None]] begin[:]
call[name[data]][constant[module_item[external_url]]] assign[=] name[module_item_external_url]
constant[Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type.]
if compare[name[module_item_new_tab] is_not constant[None]] begin[:]
call[name[data]][constant[module_item[new_tab]]] assign[=] name[module_item_new_tab]
constant[Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored]
if compare[name[module_item_completion_requirement_type] is_not constant[None]] begin[:]
call[name[self]._validate_enum, parameter[name[module_item_completion_requirement_type], list[[<ast.Constant object at 0x7da1b0bf2350>, <ast.Constant object at 0x7da1b0bf08b0>, <ast.Constant object at 0x7da1b0bf28f0>]]]]
call[name[data]][constant[module_item[completion_requirement][type]]] assign[=] name[module_item_completion_requirement_type]
constant[Minimum score required to complete, Required for completion_requirement
type 'min_score'.]
if compare[name[module_item_completion_requirement_min_score] is_not constant[None]] begin[:]
call[name[data]][constant[module_item[completion_requirement][min_score]]] assign[=] name[module_item_completion_requirement_min_score]
constant[Whether the module item is published and visible to students.]
if compare[name[module_item_published] is_not constant[None]] begin[:]
call[name[data]][constant[module_item[published]]] assign[=] name[module_item_published]
constant[Move this item to another module by specifying the target module id here.
The target module must be in the same course.]
if compare[name[module_item_module_id] is_not constant[None]] begin[:]
call[name[data]][constant[module_item[module_id]]] assign[=] name[module_item_module_id]
call[name[self].logger.debug, parameter[call[constant[PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[PUT], call[constant[/api/v1/courses/{course_id}/modules/{module_id}/items/{id}].format, parameter[]]]]]
|
keyword[def] identifier[update_module_item] ( identifier[self] , identifier[id] , identifier[course_id] , identifier[module_id] , identifier[module_item_completion_requirement_min_score] = keyword[None] , identifier[module_item_completion_requirement_type] = keyword[None] , identifier[module_item_external_url] = keyword[None] , identifier[module_item_indent] = keyword[None] , identifier[module_item_module_id] = keyword[None] , identifier[module_item_new_tab] = keyword[None] , identifier[module_item_position] = keyword[None] , identifier[module_item_published] = keyword[None] , identifier[module_item_title] = keyword[None] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[course_id]
literal[string]
identifier[path] [ literal[string] ]= identifier[module_id]
literal[string]
identifier[path] [ literal[string] ]= identifier[id]
literal[string]
keyword[if] identifier[module_item_title] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[module_item_title]
literal[string]
keyword[if] identifier[module_item_position] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[module_item_position]
literal[string]
keyword[if] identifier[module_item_indent] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[module_item_indent]
literal[string]
keyword[if] identifier[module_item_external_url] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[module_item_external_url]
literal[string]
keyword[if] identifier[module_item_new_tab] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[module_item_new_tab]
literal[string]
keyword[if] identifier[module_item_completion_requirement_type] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_validate_enum] ( identifier[module_item_completion_requirement_type] ,[ literal[string] , literal[string] , literal[string] ])
identifier[data] [ literal[string] ]= identifier[module_item_completion_requirement_type]
literal[string]
keyword[if] identifier[module_item_completion_requirement_min_score] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[module_item_completion_requirement_min_score]
literal[string]
keyword[if] identifier[module_item_published] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[module_item_published]
literal[string]
keyword[if] identifier[module_item_module_id] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[module_item_module_id]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[single_item] = keyword[True] )
|
def update_module_item(self, id, course_id, module_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_module_id=None, module_item_new_tab=None, module_item_position=None, module_item_published=None, module_item_title=None):
"""
Update a module item.
Update and return an existing module item
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - course_id
'ID'
path['course_id'] = course_id # REQUIRED - PATH - module_id
'ID'
path['module_id'] = module_id # REQUIRED - PATH - id
'ID'
path['id'] = id # OPTIONAL - module_item[title]
'The name of the module item'
if module_item_title is not None:
data['module_item[title]'] = module_item_title # depends on [control=['if'], data=['module_item_title']] # OPTIONAL - module_item[position]
'The position of this item in the module (1-based)'
if module_item_position is not None:
data['module_item[position]'] = module_item_position # depends on [control=['if'], data=['module_item_position']] # OPTIONAL - module_item[indent]
'0-based indent level; module items may be indented to show a hierarchy'
if module_item_indent is not None:
data['module_item[indent]'] = module_item_indent # depends on [control=['if'], data=['module_item_indent']] # OPTIONAL - module_item[external_url]
"External url that the item points to. Only applies to 'ExternalUrl' type."
if module_item_external_url is not None:
data['module_item[external_url]'] = module_item_external_url # depends on [control=['if'], data=['module_item_external_url']] # OPTIONAL - module_item[new_tab]
"Whether the external tool opens in a new tab. Only applies to\n 'ExternalTool' type."
if module_item_new_tab is not None:
data['module_item[new_tab]'] = module_item_new_tab # depends on [control=['if'], data=['module_item_new_tab']] # OPTIONAL - module_item[completion_requirement][type]
'Completion requirement for this module item.\n "must_view": Applies to all item types\n "must_contribute": Only applies to "Assignment", "Discussion", and "Page" types\n "must_submit", "min_score": Only apply to "Assignment" and "Quiz" types\n Inapplicable types will be ignored'
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ['must_view', 'must_contribute', 'must_submit'])
data['module_item[completion_requirement][type]'] = module_item_completion_requirement_type # depends on [control=['if'], data=['module_item_completion_requirement_type']] # OPTIONAL - module_item[completion_requirement][min_score]
"Minimum score required to complete, Required for completion_requirement\n type 'min_score'."
if module_item_completion_requirement_min_score is not None:
data['module_item[completion_requirement][min_score]'] = module_item_completion_requirement_min_score # depends on [control=['if'], data=['module_item_completion_requirement_min_score']] # OPTIONAL - module_item[published]
'Whether the module item is published and visible to students.'
if module_item_published is not None:
data['module_item[published]'] = module_item_published # depends on [control=['if'], data=['module_item_published']] # OPTIONAL - module_item[module_id]
'Move this item to another module by specifying the target module id here.\n The target module must be in the same course.'
if module_item_module_id is not None:
data['module_item[module_id]'] = module_item_module_id # depends on [control=['if'], data=['module_item_module_id']]
self.logger.debug('PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('PUT', '/api/v1/courses/{course_id}/modules/{module_id}/items/{id}'.format(**path), data=data, params=params, single_item=True)
|
def fw_rule_delete(self, data, fw_name=None):
"""Top level rule delete function. """
LOG.debug("FW Rule delete %s", data)
self._fw_rule_delete(fw_name, data)
|
def function[fw_rule_delete, parameter[self, data, fw_name]]:
constant[Top level rule delete function. ]
call[name[LOG].debug, parameter[constant[FW Rule delete %s], name[data]]]
call[name[self]._fw_rule_delete, parameter[name[fw_name], name[data]]]
|
keyword[def] identifier[fw_rule_delete] ( identifier[self] , identifier[data] , identifier[fw_name] = keyword[None] ):
literal[string]
identifier[LOG] . identifier[debug] ( literal[string] , identifier[data] )
identifier[self] . identifier[_fw_rule_delete] ( identifier[fw_name] , identifier[data] )
|
def fw_rule_delete(self, data, fw_name=None):
"""Top level rule delete function. """
LOG.debug('FW Rule delete %s', data)
self._fw_rule_delete(fw_name, data)
|
def apply_relationships(self, data, obj):
"""Apply relationship provided by data to obj
:param dict data: data provided by the client
:param DeclarativeMeta obj: the sqlalchemy object to plug relationships to
:return boolean: True if relationship have changed else False
"""
relationships_to_apply = []
relationship_fields = get_relationships(self.resource.schema, model_field=True)
for key, value in data.items():
if key in relationship_fields:
related_model = getattr(obj.__class__, key).property.mapper.class_
schema_field = get_schema_field(self.resource.schema, key)
related_id_field = self.resource.schema._declared_fields[schema_field].id_field
if isinstance(value, list):
related_objects = []
for identifier in value:
related_object = self.get_related_object(related_model, related_id_field, {'id': identifier})
related_objects.append(related_object)
relationships_to_apply.append({'field': key, 'value': related_objects})
else:
related_object = None
if value is not None:
related_object = self.get_related_object(related_model, related_id_field, {'id': value})
relationships_to_apply.append({'field': key, 'value': related_object})
for relationship in relationships_to_apply:
setattr(obj, relationship['field'], relationship['value'])
|
def function[apply_relationships, parameter[self, data, obj]]:
constant[Apply relationship provided by data to obj
:param dict data: data provided by the client
:param DeclarativeMeta obj: the sqlalchemy object to plug relationships to
:return boolean: True if relationship have changed else False
]
variable[relationships_to_apply] assign[=] list[[]]
variable[relationship_fields] assign[=] call[name[get_relationships], parameter[name[self].resource.schema]]
for taget[tuple[[<ast.Name object at 0x7da1b1643c10>, <ast.Name object at 0x7da1b1642ce0>]]] in starred[call[name[data].items, parameter[]]] begin[:]
if compare[name[key] in name[relationship_fields]] begin[:]
variable[related_model] assign[=] call[name[getattr], parameter[name[obj].__class__, name[key]]].property.mapper.class_
variable[schema_field] assign[=] call[name[get_schema_field], parameter[name[self].resource.schema, name[key]]]
variable[related_id_field] assign[=] call[name[self].resource.schema._declared_fields][name[schema_field]].id_field
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
variable[related_objects] assign[=] list[[]]
for taget[name[identifier]] in starred[name[value]] begin[:]
variable[related_object] assign[=] call[name[self].get_related_object, parameter[name[related_model], name[related_id_field], dictionary[[<ast.Constant object at 0x7da1b1643f70>], [<ast.Name object at 0x7da1b1641b70>]]]]
call[name[related_objects].append, parameter[name[related_object]]]
call[name[relationships_to_apply].append, parameter[dictionary[[<ast.Constant object at 0x7da1b16429e0>, <ast.Constant object at 0x7da1b1640d90>], [<ast.Name object at 0x7da1b16430a0>, <ast.Name object at 0x7da1b1640820>]]]]
for taget[name[relationship]] in starred[name[relationships_to_apply]] begin[:]
call[name[setattr], parameter[name[obj], call[name[relationship]][constant[field]], call[name[relationship]][constant[value]]]]
|
keyword[def] identifier[apply_relationships] ( identifier[self] , identifier[data] , identifier[obj] ):
literal[string]
identifier[relationships_to_apply] =[]
identifier[relationship_fields] = identifier[get_relationships] ( identifier[self] . identifier[resource] . identifier[schema] , identifier[model_field] = keyword[True] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[data] . identifier[items] ():
keyword[if] identifier[key] keyword[in] identifier[relationship_fields] :
identifier[related_model] = identifier[getattr] ( identifier[obj] . identifier[__class__] , identifier[key] ). identifier[property] . identifier[mapper] . identifier[class_]
identifier[schema_field] = identifier[get_schema_field] ( identifier[self] . identifier[resource] . identifier[schema] , identifier[key] )
identifier[related_id_field] = identifier[self] . identifier[resource] . identifier[schema] . identifier[_declared_fields] [ identifier[schema_field] ]. identifier[id_field]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
identifier[related_objects] =[]
keyword[for] identifier[identifier] keyword[in] identifier[value] :
identifier[related_object] = identifier[self] . identifier[get_related_object] ( identifier[related_model] , identifier[related_id_field] ,{ literal[string] : identifier[identifier] })
identifier[related_objects] . identifier[append] ( identifier[related_object] )
identifier[relationships_to_apply] . identifier[append] ({ literal[string] : identifier[key] , literal[string] : identifier[related_objects] })
keyword[else] :
identifier[related_object] = keyword[None]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[related_object] = identifier[self] . identifier[get_related_object] ( identifier[related_model] , identifier[related_id_field] ,{ literal[string] : identifier[value] })
identifier[relationships_to_apply] . identifier[append] ({ literal[string] : identifier[key] , literal[string] : identifier[related_object] })
keyword[for] identifier[relationship] keyword[in] identifier[relationships_to_apply] :
identifier[setattr] ( identifier[obj] , identifier[relationship] [ literal[string] ], identifier[relationship] [ literal[string] ])
|
def apply_relationships(self, data, obj):
"""Apply relationship provided by data to obj
:param dict data: data provided by the client
:param DeclarativeMeta obj: the sqlalchemy object to plug relationships to
:return boolean: True if relationship have changed else False
"""
relationships_to_apply = []
relationship_fields = get_relationships(self.resource.schema, model_field=True)
for (key, value) in data.items():
if key in relationship_fields:
related_model = getattr(obj.__class__, key).property.mapper.class_
schema_field = get_schema_field(self.resource.schema, key)
related_id_field = self.resource.schema._declared_fields[schema_field].id_field
if isinstance(value, list):
related_objects = []
for identifier in value:
related_object = self.get_related_object(related_model, related_id_field, {'id': identifier})
related_objects.append(related_object) # depends on [control=['for'], data=['identifier']]
relationships_to_apply.append({'field': key, 'value': related_objects}) # depends on [control=['if'], data=[]]
else:
related_object = None
if value is not None:
related_object = self.get_related_object(related_model, related_id_field, {'id': value}) # depends on [control=['if'], data=['value']]
relationships_to_apply.append({'field': key, 'value': related_object}) # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=[]]
for relationship in relationships_to_apply:
setattr(obj, relationship['field'], relationship['value']) # depends on [control=['for'], data=['relationship']]
|
def long_press(self, locator, duration=1000):
""" Long press the element with optional duration """
driver = self._current_application()
element = self._element_find(locator, True, True)
action = TouchAction(driver)
action.press(element).wait(duration).release().perform()
|
def function[long_press, parameter[self, locator, duration]]:
constant[ Long press the element with optional duration ]
variable[driver] assign[=] call[name[self]._current_application, parameter[]]
variable[element] assign[=] call[name[self]._element_find, parameter[name[locator], constant[True], constant[True]]]
variable[action] assign[=] call[name[TouchAction], parameter[name[driver]]]
call[call[call[call[name[action].press, parameter[name[element]]].wait, parameter[name[duration]]].release, parameter[]].perform, parameter[]]
|
keyword[def] identifier[long_press] ( identifier[self] , identifier[locator] , identifier[duration] = literal[int] ):
literal[string]
identifier[driver] = identifier[self] . identifier[_current_application] ()
identifier[element] = identifier[self] . identifier[_element_find] ( identifier[locator] , keyword[True] , keyword[True] )
identifier[action] = identifier[TouchAction] ( identifier[driver] )
identifier[action] . identifier[press] ( identifier[element] ). identifier[wait] ( identifier[duration] ). identifier[release] (). identifier[perform] ()
|
def long_press(self, locator, duration=1000):
""" Long press the element with optional duration """
driver = self._current_application()
element = self._element_find(locator, True, True)
action = TouchAction(driver)
action.press(element).wait(duration).release().perform()
|
def find_library_linux(cls):
"""Loads the SEGGER DLL from the root directory.
On Linux, the SEGGER tools are installed under the ``/opt/SEGGER``
directory with versioned directories having the suffix ``_VERSION``.
Args:
cls (Library): the ``Library`` class
Returns:
The paths to the J-Link library files in the order that they are
found.
"""
dll = Library.JLINK_SDK_NAME
root = os.path.join('/', 'opt', 'SEGGER')
for (directory_name, subdirs, files) in os.walk(root):
fnames = []
x86_found = False
for f in files:
path = os.path.join(directory_name, f)
if os.path.isfile(path) and f.startswith(dll):
fnames.append(f)
if '_x86' in path:
x86_found = True
for fname in fnames:
fpath = os.path.join(directory_name, fname)
if util.is_os_64bit():
if '_x86' not in fname:
yield fpath
elif x86_found:
if '_x86' in fname:
yield fpath
else:
yield fpath
|
def function[find_library_linux, parameter[cls]]:
constant[Loads the SEGGER DLL from the root directory.
On Linux, the SEGGER tools are installed under the ``/opt/SEGGER``
directory with versioned directories having the suffix ``_VERSION``.
Args:
cls (Library): the ``Library`` class
Returns:
The paths to the J-Link library files in the order that they are
found.
]
variable[dll] assign[=] name[Library].JLINK_SDK_NAME
variable[root] assign[=] call[name[os].path.join, parameter[constant[/], constant[opt], constant[SEGGER]]]
for taget[tuple[[<ast.Name object at 0x7da1b1721a20>, <ast.Name object at 0x7da1b1721ba0>, <ast.Name object at 0x7da1b1720c40>]]] in starred[call[name[os].walk, parameter[name[root]]]] begin[:]
variable[fnames] assign[=] list[[]]
variable[x86_found] assign[=] constant[False]
for taget[name[f]] in starred[name[files]] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[directory_name], name[f]]]
if <ast.BoolOp object at 0x7da1b1720310> begin[:]
call[name[fnames].append, parameter[name[f]]]
if compare[constant[_x86] in name[path]] begin[:]
variable[x86_found] assign[=] constant[True]
for taget[name[fname]] in starred[name[fnames]] begin[:]
variable[fpath] assign[=] call[name[os].path.join, parameter[name[directory_name], name[fname]]]
if call[name[util].is_os_64bit, parameter[]] begin[:]
if compare[constant[_x86] <ast.NotIn object at 0x7da2590d7190> name[fname]] begin[:]
<ast.Yield object at 0x7da1b17df280>
|
keyword[def] identifier[find_library_linux] ( identifier[cls] ):
literal[string]
identifier[dll] = identifier[Library] . identifier[JLINK_SDK_NAME]
identifier[root] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , literal[string] , literal[string] )
keyword[for] ( identifier[directory_name] , identifier[subdirs] , identifier[files] ) keyword[in] identifier[os] . identifier[walk] ( identifier[root] ):
identifier[fnames] =[]
identifier[x86_found] = keyword[False]
keyword[for] identifier[f] keyword[in] identifier[files] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory_name] , identifier[f] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ) keyword[and] identifier[f] . identifier[startswith] ( identifier[dll] ):
identifier[fnames] . identifier[append] ( identifier[f] )
keyword[if] literal[string] keyword[in] identifier[path] :
identifier[x86_found] = keyword[True]
keyword[for] identifier[fname] keyword[in] identifier[fnames] :
identifier[fpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory_name] , identifier[fname] )
keyword[if] identifier[util] . identifier[is_os_64bit] ():
keyword[if] literal[string] keyword[not] keyword[in] identifier[fname] :
keyword[yield] identifier[fpath]
keyword[elif] identifier[x86_found] :
keyword[if] literal[string] keyword[in] identifier[fname] :
keyword[yield] identifier[fpath]
keyword[else] :
keyword[yield] identifier[fpath]
|
def find_library_linux(cls):
"""Loads the SEGGER DLL from the root directory.
On Linux, the SEGGER tools are installed under the ``/opt/SEGGER``
directory with versioned directories having the suffix ``_VERSION``.
Args:
cls (Library): the ``Library`` class
Returns:
The paths to the J-Link library files in the order that they are
found.
"""
dll = Library.JLINK_SDK_NAME
root = os.path.join('/', 'opt', 'SEGGER')
for (directory_name, subdirs, files) in os.walk(root):
fnames = []
x86_found = False
for f in files:
path = os.path.join(directory_name, f)
if os.path.isfile(path) and f.startswith(dll):
fnames.append(f)
if '_x86' in path:
x86_found = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
for fname in fnames:
fpath = os.path.join(directory_name, fname)
if util.is_os_64bit():
if '_x86' not in fname:
yield fpath # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif x86_found:
if '_x86' in fname:
yield fpath # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
yield fpath # depends on [control=['for'], data=['fname']] # depends on [control=['for'], data=[]]
|
def set_locked_variable(self, key, access_key, value):
"""Set an already locked global variable
:param key: the key of the global variable to be set
:param access_key: the access key to the already locked global variable
:param value: the new value of the global variable
"""
return self.set_variable(key, value, per_reference=False, access_key=access_key)
|
def function[set_locked_variable, parameter[self, key, access_key, value]]:
constant[Set an already locked global variable
:param key: the key of the global variable to be set
:param access_key: the access key to the already locked global variable
:param value: the new value of the global variable
]
return[call[name[self].set_variable, parameter[name[key], name[value]]]]
|
keyword[def] identifier[set_locked_variable] ( identifier[self] , identifier[key] , identifier[access_key] , identifier[value] ):
literal[string]
keyword[return] identifier[self] . identifier[set_variable] ( identifier[key] , identifier[value] , identifier[per_reference] = keyword[False] , identifier[access_key] = identifier[access_key] )
|
def set_locked_variable(self, key, access_key, value):
"""Set an already locked global variable
:param key: the key of the global variable to be set
:param access_key: the access key to the already locked global variable
:param value: the new value of the global variable
"""
return self.set_variable(key, value, per_reference=False, access_key=access_key)
|
def zoom_in(self, increment=1):
"""
Zooms in the editor (makes the font bigger).
:param increment: zoom level increment. Default is 1.
"""
self.zoom_level += increment
TextHelper(self).mark_whole_doc_dirty()
self._reset_stylesheet()
|
def function[zoom_in, parameter[self, increment]]:
constant[
Zooms in the editor (makes the font bigger).
:param increment: zoom level increment. Default is 1.
]
<ast.AugAssign object at 0x7da18f720a90>
call[call[name[TextHelper], parameter[name[self]]].mark_whole_doc_dirty, parameter[]]
call[name[self]._reset_stylesheet, parameter[]]
|
keyword[def] identifier[zoom_in] ( identifier[self] , identifier[increment] = literal[int] ):
literal[string]
identifier[self] . identifier[zoom_level] += identifier[increment]
identifier[TextHelper] ( identifier[self] ). identifier[mark_whole_doc_dirty] ()
identifier[self] . identifier[_reset_stylesheet] ()
|
def zoom_in(self, increment=1):
"""
Zooms in the editor (makes the font bigger).
:param increment: zoom level increment. Default is 1.
"""
self.zoom_level += increment
TextHelper(self).mark_whole_doc_dirty()
self._reset_stylesheet()
|
def _reduction_output_shape(x, output_shape, reduced_dim):
"""Helper function to reduce_sum, etc."""
if output_shape is None:
if reduced_dim is None:
return Shape([])
else:
if reduced_dim not in x.shape.dims:
raise ValueError(
"reduced_dim=%s not in x.shape.dims=%s" % (reduced_dim, x.shape))
return x.shape - reduced_dim
if reduced_dim is not None:
if [reduced_dim] != [d for d in x.shape.dims if d not in output_shape.dims]:
raise ValueError(
"reduced_dim contradicts output_shape:"
"x=%s output_shape=%s reduced_dim=%s" %
(x, output_shape, reduced_dim))
return output_shape
|
def function[_reduction_output_shape, parameter[x, output_shape, reduced_dim]]:
constant[Helper function to reduce_sum, etc.]
if compare[name[output_shape] is constant[None]] begin[:]
if compare[name[reduced_dim] is constant[None]] begin[:]
return[call[name[Shape], parameter[list[[]]]]]
if compare[name[reduced_dim] is_not constant[None]] begin[:]
if compare[list[[<ast.Name object at 0x7da20e9b0550>]] not_equal[!=] <ast.ListComp object at 0x7da20e9b1360>] begin[:]
<ast.Raise object at 0x7da20e9b0bb0>
return[name[output_shape]]
|
keyword[def] identifier[_reduction_output_shape] ( identifier[x] , identifier[output_shape] , identifier[reduced_dim] ):
literal[string]
keyword[if] identifier[output_shape] keyword[is] keyword[None] :
keyword[if] identifier[reduced_dim] keyword[is] keyword[None] :
keyword[return] identifier[Shape] ([])
keyword[else] :
keyword[if] identifier[reduced_dim] keyword[not] keyword[in] identifier[x] . identifier[shape] . identifier[dims] :
keyword[raise] identifier[ValueError] (
literal[string] %( identifier[reduced_dim] , identifier[x] . identifier[shape] ))
keyword[return] identifier[x] . identifier[shape] - identifier[reduced_dim]
keyword[if] identifier[reduced_dim] keyword[is] keyword[not] keyword[None] :
keyword[if] [ identifier[reduced_dim] ]!=[ identifier[d] keyword[for] identifier[d] keyword[in] identifier[x] . identifier[shape] . identifier[dims] keyword[if] identifier[d] keyword[not] keyword[in] identifier[output_shape] . identifier[dims] ]:
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] %
( identifier[x] , identifier[output_shape] , identifier[reduced_dim] ))
keyword[return] identifier[output_shape]
|
def _reduction_output_shape(x, output_shape, reduced_dim):
"""Helper function to reduce_sum, etc."""
if output_shape is None:
if reduced_dim is None:
return Shape([]) # depends on [control=['if'], data=[]]
else:
if reduced_dim not in x.shape.dims:
raise ValueError('reduced_dim=%s not in x.shape.dims=%s' % (reduced_dim, x.shape)) # depends on [control=['if'], data=['reduced_dim']]
return x.shape - reduced_dim # depends on [control=['if'], data=[]]
if reduced_dim is not None:
if [reduced_dim] != [d for d in x.shape.dims if d not in output_shape.dims]:
raise ValueError('reduced_dim contradicts output_shape:x=%s output_shape=%s reduced_dim=%s' % (x, output_shape, reduced_dim)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['reduced_dim']]
return output_shape
|
def wordify(self):
"""
Constructs string of all documents.
:return: document representation of the dataset, one line per document
:rtype: str
"""
string_documents = []
for klass, document in zip(self.resulting_classes, self.resulting_documents):
string_documents.append("!" + str(klass) + " " + '' .join(document))
return '\n'.join(string_documents)
|
def function[wordify, parameter[self]]:
constant[
Constructs string of all documents.
:return: document representation of the dataset, one line per document
:rtype: str
]
variable[string_documents] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da204622350>, <ast.Name object at 0x7da204623e50>]]] in starred[call[name[zip], parameter[name[self].resulting_classes, name[self].resulting_documents]]] begin[:]
call[name[string_documents].append, parameter[binary_operation[binary_operation[binary_operation[constant[!] + call[name[str], parameter[name[klass]]]] + constant[ ]] + call[constant[].join, parameter[name[document]]]]]]
return[call[constant[
].join, parameter[name[string_documents]]]]
|
keyword[def] identifier[wordify] ( identifier[self] ):
literal[string]
identifier[string_documents] =[]
keyword[for] identifier[klass] , identifier[document] keyword[in] identifier[zip] ( identifier[self] . identifier[resulting_classes] , identifier[self] . identifier[resulting_documents] ):
identifier[string_documents] . identifier[append] ( literal[string] + identifier[str] ( identifier[klass] )+ literal[string] + literal[string] . identifier[join] ( identifier[document] ))
keyword[return] literal[string] . identifier[join] ( identifier[string_documents] )
|
def wordify(self):
"""
Constructs string of all documents.
:return: document representation of the dataset, one line per document
:rtype: str
"""
string_documents = []
for (klass, document) in zip(self.resulting_classes, self.resulting_documents):
string_documents.append('!' + str(klass) + ' ' + ''.join(document)) # depends on [control=['for'], data=[]]
return '\n'.join(string_documents)
|
def get_package_format_names(predicate=None):
"""Get names for available package formats."""
return [
k
for k, v in six.iteritems(get_package_formats())
if not predicate or predicate(k, v)
]
|
def function[get_package_format_names, parameter[predicate]]:
constant[Get names for available package formats.]
return[<ast.ListComp object at 0x7da1b19c10f0>]
|
keyword[def] identifier[get_package_format_names] ( identifier[predicate] = keyword[None] ):
literal[string]
keyword[return] [
identifier[k]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[get_package_formats] ())
keyword[if] keyword[not] identifier[predicate] keyword[or] identifier[predicate] ( identifier[k] , identifier[v] )
]
|
def get_package_format_names(predicate=None):
"""Get names for available package formats."""
return [k for (k, v) in six.iteritems(get_package_formats()) if not predicate or predicate(k, v)]
|
def is_gvcf_file(in_file):
"""Check if an input file is raw gVCF
"""
to_check = 100
n = 0
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith("##"):
if n > to_check:
break
n += 1
parts = line.split("\t")
# GATK
if parts[4] == "<NON_REF>":
return True
# strelka2
if parts[4] == "." and parts[7].startswith("BLOCKAVG"):
return True
# freebayes
if parts[4] == "<*>":
return True
# platypue
if parts[4] == "N" and parts[6] == "REFCALL":
return True
|
def function[is_gvcf_file, parameter[in_file]]:
constant[Check if an input file is raw gVCF
]
variable[to_check] assign[=] constant[100]
variable[n] assign[=] constant[0]
with call[name[utils].open_gzipsafe, parameter[name[in_file]]] begin[:]
for taget[name[line]] in starred[name[in_handle]] begin[:]
if <ast.UnaryOp object at 0x7da1b1897d30> begin[:]
if compare[name[n] greater[>] name[to_check]] begin[:]
break
<ast.AugAssign object at 0x7da1b18971c0>
variable[parts] assign[=] call[name[line].split, parameter[constant[ ]]]
if compare[call[name[parts]][constant[4]] equal[==] constant[<NON_REF>]] begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b1833fd0> begin[:]
return[constant[True]]
if compare[call[name[parts]][constant[4]] equal[==] constant[<*>]] begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b1830be0> begin[:]
return[constant[True]]
|
keyword[def] identifier[is_gvcf_file] ( identifier[in_file] ):
literal[string]
identifier[to_check] = literal[int]
identifier[n] = literal[int]
keyword[with] identifier[utils] . identifier[open_gzipsafe] ( identifier[in_file] ) keyword[as] identifier[in_handle] :
keyword[for] identifier[line] keyword[in] identifier[in_handle] :
keyword[if] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[n] > identifier[to_check] :
keyword[break]
identifier[n] += literal[int]
identifier[parts] = identifier[line] . identifier[split] ( literal[string] )
keyword[if] identifier[parts] [ literal[int] ]== literal[string] :
keyword[return] keyword[True]
keyword[if] identifier[parts] [ literal[int] ]== literal[string] keyword[and] identifier[parts] [ literal[int] ]. identifier[startswith] ( literal[string] ):
keyword[return] keyword[True]
keyword[if] identifier[parts] [ literal[int] ]== literal[string] :
keyword[return] keyword[True]
keyword[if] identifier[parts] [ literal[int] ]== literal[string] keyword[and] identifier[parts] [ literal[int] ]== literal[string] :
keyword[return] keyword[True]
|
def is_gvcf_file(in_file):
"""Check if an input file is raw gVCF
"""
to_check = 100
n = 0
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith('##'):
if n > to_check:
break # depends on [control=['if'], data=[]]
n += 1
parts = line.split('\t')
# GATK
if parts[4] == '<NON_REF>':
return True # depends on [control=['if'], data=[]]
# strelka2
if parts[4] == '.' and parts[7].startswith('BLOCKAVG'):
return True # depends on [control=['if'], data=[]]
# freebayes
if parts[4] == '<*>':
return True # depends on [control=['if'], data=[]]
# platypue
if parts[4] == 'N' and parts[6] == 'REFCALL':
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['in_handle']]
|
def add_based_on_time(files_with_tags, on_location):
'''
Sometimes the first photo in a series does not have gps coordinates because the phone
doesnt have a gps-fix yet. To add these photos as well we take the list of photos wich where
taken in the right location. Then add any photos taken whitin 10 minutes of these photos,
because they are almost certainly taken in the same area.
'''
to_add = dict()
for veiling_f, veiling_tags in on_location.items():
for compare_f, compare_tags in files_with_tags.items():
delta = abs(veiling_tags['TIME'] - compare_tags['TIME'])
if (delta.total_seconds() < 10 * 60) and (compare_f not in on_location.keys()):
to_add[compare_f] = compare_tags
return to_add
|
def function[add_based_on_time, parameter[files_with_tags, on_location]]:
constant[
Sometimes the first photo in a series does not have gps coordinates because the phone
doesnt have a gps-fix yet. To add these photos as well we take the list of photos wich where
taken in the right location. Then add any photos taken whitin 10 minutes of these photos,
because they are almost certainly taken in the same area.
]
variable[to_add] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da204564280>, <ast.Name object at 0x7da204566d40>]]] in starred[call[name[on_location].items, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da204566110>, <ast.Name object at 0x7da204564790>]]] in starred[call[name[files_with_tags].items, parameter[]]] begin[:]
variable[delta] assign[=] call[name[abs], parameter[binary_operation[call[name[veiling_tags]][constant[TIME]] - call[name[compare_tags]][constant[TIME]]]]]
if <ast.BoolOp object at 0x7da204566410> begin[:]
call[name[to_add]][name[compare_f]] assign[=] name[compare_tags]
return[name[to_add]]
|
keyword[def] identifier[add_based_on_time] ( identifier[files_with_tags] , identifier[on_location] ):
literal[string]
identifier[to_add] = identifier[dict] ()
keyword[for] identifier[veiling_f] , identifier[veiling_tags] keyword[in] identifier[on_location] . identifier[items] ():
keyword[for] identifier[compare_f] , identifier[compare_tags] keyword[in] identifier[files_with_tags] . identifier[items] ():
identifier[delta] = identifier[abs] ( identifier[veiling_tags] [ literal[string] ]- identifier[compare_tags] [ literal[string] ])
keyword[if] ( identifier[delta] . identifier[total_seconds] ()< literal[int] * literal[int] ) keyword[and] ( identifier[compare_f] keyword[not] keyword[in] identifier[on_location] . identifier[keys] ()):
identifier[to_add] [ identifier[compare_f] ]= identifier[compare_tags]
keyword[return] identifier[to_add]
|
def add_based_on_time(files_with_tags, on_location):
"""
Sometimes the first photo in a series does not have gps coordinates because the phone
doesnt have a gps-fix yet. To add these photos as well we take the list of photos wich where
taken in the right location. Then add any photos taken whitin 10 minutes of these photos,
because they are almost certainly taken in the same area.
"""
to_add = dict()
for (veiling_f, veiling_tags) in on_location.items():
for (compare_f, compare_tags) in files_with_tags.items():
delta = abs(veiling_tags['TIME'] - compare_tags['TIME'])
if delta.total_seconds() < 10 * 60 and compare_f not in on_location.keys():
to_add[compare_f] = compare_tags # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return to_add
|
def format_fat(self, quick):
"""Formats the medium as FAT. Generally only useful for floppy images as
no partition table will be created.
in quick of type bool
Quick format it when set.
"""
if not isinstance(quick, bool):
raise TypeError("quick can only be an instance of type bool")
self._call("formatFAT",
in_p=[quick])
|
def function[format_fat, parameter[self, quick]]:
constant[Formats the medium as FAT. Generally only useful for floppy images as
no partition table will be created.
in quick of type bool
Quick format it when set.
]
if <ast.UnaryOp object at 0x7da204346f80> begin[:]
<ast.Raise object at 0x7da204344370>
call[name[self]._call, parameter[constant[formatFAT]]]
|
keyword[def] identifier[format_fat] ( identifier[self] , identifier[quick] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[quick] , identifier[bool] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[quick] ])
|
def format_fat(self, quick):
"""Formats the medium as FAT. Generally only useful for floppy images as
no partition table will be created.
in quick of type bool
Quick format it when set.
"""
if not isinstance(quick, bool):
raise TypeError('quick can only be an instance of type bool') # depends on [control=['if'], data=[]]
self._call('formatFAT', in_p=[quick])
|
def changes(new_cmp_dict, old_cmp_dict, id_column, columns):
"""Return a list dict of the changes of the
rows that exist in both dictionaries
User must provide an ID column for old_cmp_dict
"""
update_ldict = []
same_keys = set(new_cmp_dict).intersection(set(old_cmp_dict))
for same_key in same_keys:
# Get the Union of the set of keys
# for both dictionaries to account
# for missing keys
old_dict = old_cmp_dict[same_key]
new_dict = new_cmp_dict[same_key]
dict_keys = set(old_dict).intersection(set(new_dict))
update_dict = {}
for dict_key in columns:
old_val = old_dict.get(dict_key, 'NaN')
new_val = new_dict.get(dict_key, 'NaN')
if old_val != new_val and new_val != 'NaN':
if id_column!=None:
try:
update_dict[id_column] = old_dict[id_column]
except KeyError:
print("Input Dictionary 'old_cmp_dict' must have ID column")
update_dict[dict_key] = new_val
if update_dict:
update_ldict.append(update_dict)
return update_ldict
|
def function[changes, parameter[new_cmp_dict, old_cmp_dict, id_column, columns]]:
constant[Return a list dict of the changes of the
rows that exist in both dictionaries
User must provide an ID column for old_cmp_dict
]
variable[update_ldict] assign[=] list[[]]
variable[same_keys] assign[=] call[call[name[set], parameter[name[new_cmp_dict]]].intersection, parameter[call[name[set], parameter[name[old_cmp_dict]]]]]
for taget[name[same_key]] in starred[name[same_keys]] begin[:]
variable[old_dict] assign[=] call[name[old_cmp_dict]][name[same_key]]
variable[new_dict] assign[=] call[name[new_cmp_dict]][name[same_key]]
variable[dict_keys] assign[=] call[call[name[set], parameter[name[old_dict]]].intersection, parameter[call[name[set], parameter[name[new_dict]]]]]
variable[update_dict] assign[=] dictionary[[], []]
for taget[name[dict_key]] in starred[name[columns]] begin[:]
variable[old_val] assign[=] call[name[old_dict].get, parameter[name[dict_key], constant[NaN]]]
variable[new_val] assign[=] call[name[new_dict].get, parameter[name[dict_key], constant[NaN]]]
if <ast.BoolOp object at 0x7da18dc98190> begin[:]
if compare[name[id_column] not_equal[!=] constant[None]] begin[:]
<ast.Try object at 0x7da18dc98310>
call[name[update_dict]][name[dict_key]] assign[=] name[new_val]
if name[update_dict] begin[:]
call[name[update_ldict].append, parameter[name[update_dict]]]
return[name[update_ldict]]
|
keyword[def] identifier[changes] ( identifier[new_cmp_dict] , identifier[old_cmp_dict] , identifier[id_column] , identifier[columns] ):
literal[string]
identifier[update_ldict] =[]
identifier[same_keys] = identifier[set] ( identifier[new_cmp_dict] ). identifier[intersection] ( identifier[set] ( identifier[old_cmp_dict] ))
keyword[for] identifier[same_key] keyword[in] identifier[same_keys] :
identifier[old_dict] = identifier[old_cmp_dict] [ identifier[same_key] ]
identifier[new_dict] = identifier[new_cmp_dict] [ identifier[same_key] ]
identifier[dict_keys] = identifier[set] ( identifier[old_dict] ). identifier[intersection] ( identifier[set] ( identifier[new_dict] ))
identifier[update_dict] ={}
keyword[for] identifier[dict_key] keyword[in] identifier[columns] :
identifier[old_val] = identifier[old_dict] . identifier[get] ( identifier[dict_key] , literal[string] )
identifier[new_val] = identifier[new_dict] . identifier[get] ( identifier[dict_key] , literal[string] )
keyword[if] identifier[old_val] != identifier[new_val] keyword[and] identifier[new_val] != literal[string] :
keyword[if] identifier[id_column] != keyword[None] :
keyword[try] :
identifier[update_dict] [ identifier[id_column] ]= identifier[old_dict] [ identifier[id_column] ]
keyword[except] identifier[KeyError] :
identifier[print] ( literal[string] )
identifier[update_dict] [ identifier[dict_key] ]= identifier[new_val]
keyword[if] identifier[update_dict] :
identifier[update_ldict] . identifier[append] ( identifier[update_dict] )
keyword[return] identifier[update_ldict]
|
def changes(new_cmp_dict, old_cmp_dict, id_column, columns):
"""Return a list dict of the changes of the
rows that exist in both dictionaries
User must provide an ID column for old_cmp_dict
"""
update_ldict = []
same_keys = set(new_cmp_dict).intersection(set(old_cmp_dict))
for same_key in same_keys:
# Get the Union of the set of keys
# for both dictionaries to account
# for missing keys
old_dict = old_cmp_dict[same_key]
new_dict = new_cmp_dict[same_key]
dict_keys = set(old_dict).intersection(set(new_dict))
update_dict = {}
for dict_key in columns:
old_val = old_dict.get(dict_key, 'NaN')
new_val = new_dict.get(dict_key, 'NaN')
if old_val != new_val and new_val != 'NaN':
if id_column != None:
try:
update_dict[id_column] = old_dict[id_column] # depends on [control=['try'], data=[]]
except KeyError:
print("Input Dictionary 'old_cmp_dict' must have ID column") # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['id_column']]
update_dict[dict_key] = new_val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dict_key']]
if update_dict:
update_ldict.append(update_dict) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['same_key']]
return update_ldict
|
async def list(self, *, filters: Mapping = None) -> List[Mapping]:
"""
Return a list of services
Args:
filters: a dict with a list of filters
Available filters:
id=<service id>
label=<service label>
mode=["replicated"|"global"]
name=<service name>
"""
params = {"filters": clean_filters(filters)}
response = await self.docker._query_json(
"services", method="GET", params=params
)
return response
|
<ast.AsyncFunctionDef object at 0x7da1b088b9a0>
|
keyword[async] keyword[def] identifier[list] ( identifier[self] ,*, identifier[filters] : identifier[Mapping] = keyword[None] )-> identifier[List] [ identifier[Mapping] ]:
literal[string]
identifier[params] ={ literal[string] : identifier[clean_filters] ( identifier[filters] )}
identifier[response] = keyword[await] identifier[self] . identifier[docker] . identifier[_query_json] (
literal[string] , identifier[method] = literal[string] , identifier[params] = identifier[params]
)
keyword[return] identifier[response]
|
async def list(self, *, filters: Mapping=None) -> List[Mapping]:
"""
Return a list of services
Args:
filters: a dict with a list of filters
Available filters:
id=<service id>
label=<service label>
mode=["replicated"|"global"]
name=<service name>
"""
params = {'filters': clean_filters(filters)}
response = await self.docker._query_json('services', method='GET', params=params)
return response
|
def mentions_links(uri, s):
""" Turns mentions-like strings into HTML links,
@uri: /uri/ root for the hashtag-like
@s: the #str string you're looking for |@|mentions in
-> #str HTML link |<a href="/uri/mention">mention</a>|
"""
for username, after in mentions_re.findall(s):
_uri = '/' + (uri or "").lstrip("/") + quote(username)
link = '<a href="{}">@{}</a>{}'.format(_uri.lower(), username, after)
s = s.replace('@' + username, link)
return s
|
def function[mentions_links, parameter[uri, s]]:
constant[ Turns mentions-like strings into HTML links,
@uri: /uri/ root for the hashtag-like
@s: the #str string you're looking for |@|mentions in
-> #str HTML link |<a href="/uri/mention">mention</a>|
]
for taget[tuple[[<ast.Name object at 0x7da1b10d6140>, <ast.Name object at 0x7da1b10d47f0>]]] in starred[call[name[mentions_re].findall, parameter[name[s]]]] begin[:]
variable[_uri] assign[=] binary_operation[binary_operation[constant[/] + call[<ast.BoolOp object at 0x7da1b10d5d20>.lstrip, parameter[constant[/]]]] + call[name[quote], parameter[name[username]]]]
variable[link] assign[=] call[constant[<a href="{}">@{}</a>{}].format, parameter[call[name[_uri].lower, parameter[]], name[username], name[after]]]
variable[s] assign[=] call[name[s].replace, parameter[binary_operation[constant[@] + name[username]], name[link]]]
return[name[s]]
|
keyword[def] identifier[mentions_links] ( identifier[uri] , identifier[s] ):
literal[string]
keyword[for] identifier[username] , identifier[after] keyword[in] identifier[mentions_re] . identifier[findall] ( identifier[s] ):
identifier[_uri] = literal[string] +( identifier[uri] keyword[or] literal[string] ). identifier[lstrip] ( literal[string] )+ identifier[quote] ( identifier[username] )
identifier[link] = literal[string] . identifier[format] ( identifier[_uri] . identifier[lower] (), identifier[username] , identifier[after] )
identifier[s] = identifier[s] . identifier[replace] ( literal[string] + identifier[username] , identifier[link] )
keyword[return] identifier[s]
|
def mentions_links(uri, s):
""" Turns mentions-like strings into HTML links,
@uri: /uri/ root for the hashtag-like
@s: the #str string you're looking for |@|mentions in
-> #str HTML link |<a href="/uri/mention">mention</a>|
"""
for (username, after) in mentions_re.findall(s):
_uri = '/' + (uri or '').lstrip('/') + quote(username)
link = '<a href="{}">@{}</a>{}'.format(_uri.lower(), username, after)
s = s.replace('@' + username, link) # depends on [control=['for'], data=[]]
return s
|
def bbox_vert_aligned_center(box1, box2):
"""
Returns true if the center of both boxes is within 5 pts
"""
if not (box1 and box2):
return False
return abs(((box1.right + box1.left) / 2.0) - ((box2.right + box2.left) / 2.0)) <= 5
|
def function[bbox_vert_aligned_center, parameter[box1, box2]]:
constant[
Returns true if the center of both boxes is within 5 pts
]
if <ast.UnaryOp object at 0x7da18ede5e10> begin[:]
return[constant[False]]
return[compare[call[name[abs], parameter[binary_operation[binary_operation[binary_operation[name[box1].right + name[box1].left] / constant[2.0]] - binary_operation[binary_operation[name[box2].right + name[box2].left] / constant[2.0]]]]] less_or_equal[<=] constant[5]]]
|
keyword[def] identifier[bbox_vert_aligned_center] ( identifier[box1] , identifier[box2] ):
literal[string]
keyword[if] keyword[not] ( identifier[box1] keyword[and] identifier[box2] ):
keyword[return] keyword[False]
keyword[return] identifier[abs] ((( identifier[box1] . identifier[right] + identifier[box1] . identifier[left] )/ literal[int] )-(( identifier[box2] . identifier[right] + identifier[box2] . identifier[left] )/ literal[int] ))<= literal[int]
|
def bbox_vert_aligned_center(box1, box2):
"""
Returns true if the center of both boxes is within 5 pts
"""
if not (box1 and box2):
return False # depends on [control=['if'], data=[]]
return abs((box1.right + box1.left) / 2.0 - (box2.right + box2.left) / 2.0) <= 5
|
def set_range_y(self,val):
""" Set visible range of y data.
Note: Padding must be 0 or it will create an infinite loop
"""
d = self.declaration
if d.auto_range[1]:
return
self.widget.setYRange(*val,padding=0)
|
def function[set_range_y, parameter[self, val]]:
constant[ Set visible range of y data.
Note: Padding must be 0 or it will create an infinite loop
]
variable[d] assign[=] name[self].declaration
if call[name[d].auto_range][constant[1]] begin[:]
return[None]
call[name[self].widget.setYRange, parameter[<ast.Starred object at 0x7da18bcc8700>]]
|
keyword[def] identifier[set_range_y] ( identifier[self] , identifier[val] ):
literal[string]
identifier[d] = identifier[self] . identifier[declaration]
keyword[if] identifier[d] . identifier[auto_range] [ literal[int] ]:
keyword[return]
identifier[self] . identifier[widget] . identifier[setYRange] (* identifier[val] , identifier[padding] = literal[int] )
|
def set_range_y(self, val):
""" Set visible range of y data.
Note: Padding must be 0 or it will create an infinite loop
"""
d = self.declaration
if d.auto_range[1]:
return # depends on [control=['if'], data=[]]
self.widget.setYRange(*val, padding=0)
|
def update(self, job):
"""Update last_run, next_run, and last_run_result for an existing job.
:param dict job: The job dictionary
:returns: True
"""
self.cur.execute('''UPDATE jobs
SET last_run=?,next_run=?,last_run_result=? WHERE hash=?''', (
job["last-run"], job["next-run"], job["last-run-result"], job["id"]))
|
def function[update, parameter[self, job]]:
constant[Update last_run, next_run, and last_run_result for an existing job.
:param dict job: The job dictionary
:returns: True
]
call[name[self].cur.execute, parameter[constant[UPDATE jobs
SET last_run=?,next_run=?,last_run_result=? WHERE hash=?], tuple[[<ast.Subscript object at 0x7da1b2046fe0>, <ast.Subscript object at 0x7da1b2046dd0>, <ast.Subscript object at 0x7da1b20126e0>, <ast.Subscript object at 0x7da1b20104c0>]]]]
|
keyword[def] identifier[update] ( identifier[self] , identifier[job] ):
literal[string]
identifier[self] . identifier[cur] . identifier[execute] ( literal[string] ,(
identifier[job] [ literal[string] ], identifier[job] [ literal[string] ], identifier[job] [ literal[string] ], identifier[job] [ literal[string] ]))
|
def update(self, job):
"""Update last_run, next_run, and last_run_result for an existing job.
:param dict job: The job dictionary
:returns: True
"""
self.cur.execute('UPDATE jobs\n SET last_run=?,next_run=?,last_run_result=? WHERE hash=?', (job['last-run'], job['next-run'], job['last-run-result'], job['id']))
|
def start(self):
"""
Start the reader, acquires the global lock before appending the descriptor on the stream.
Releases the lock afterwards.
:return: Nothing
"""
NonBlockingStreamReader._stream_mtx.acquire()
NonBlockingStreamReader._streams.append(self._descriptor)
NonBlockingStreamReader._stream_mtx.release()
|
def function[start, parameter[self]]:
constant[
Start the reader, acquires the global lock before appending the descriptor on the stream.
Releases the lock afterwards.
:return: Nothing
]
call[name[NonBlockingStreamReader]._stream_mtx.acquire, parameter[]]
call[name[NonBlockingStreamReader]._streams.append, parameter[name[self]._descriptor]]
call[name[NonBlockingStreamReader]._stream_mtx.release, parameter[]]
|
keyword[def] identifier[start] ( identifier[self] ):
literal[string]
identifier[NonBlockingStreamReader] . identifier[_stream_mtx] . identifier[acquire] ()
identifier[NonBlockingStreamReader] . identifier[_streams] . identifier[append] ( identifier[self] . identifier[_descriptor] )
identifier[NonBlockingStreamReader] . identifier[_stream_mtx] . identifier[release] ()
|
def start(self):
"""
Start the reader, acquires the global lock before appending the descriptor on the stream.
Releases the lock afterwards.
:return: Nothing
"""
NonBlockingStreamReader._stream_mtx.acquire()
NonBlockingStreamReader._streams.append(self._descriptor)
NonBlockingStreamReader._stream_mtx.release()
|
def p_DefaultValue_string(p):
"""DefaultValue : STRING"""
p[0] = model.Value(type=model.Value.STRING, value=p[1])
|
def function[p_DefaultValue_string, parameter[p]]:
constant[DefaultValue : STRING]
call[name[p]][constant[0]] assign[=] call[name[model].Value, parameter[]]
|
keyword[def] identifier[p_DefaultValue_string] ( identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[model] . identifier[Value] ( identifier[type] = identifier[model] . identifier[Value] . identifier[STRING] , identifier[value] = identifier[p] [ literal[int] ])
|
def p_DefaultValue_string(p):
"""DefaultValue : STRING"""
p[0] = model.Value(type=model.Value.STRING, value=p[1])
|
def run_server(cls, args=None, **kwargs):
"""Run the class as a device server.
It is based on the tango.server.run method.
The difference is that the device class
and server name are automatically given.
Args:
args (iterable): args as given in the tango.server.run method
without the server name. If None, the sys.argv
list is used
kwargs: the other keywords argument are as given
in the tango.server.run method.
"""
if args is None:
args = sys.argv[1:]
args = [cls.__name__] + list(args)
green_mode = getattr(cls, 'green_mode', None)
kwargs.setdefault("green_mode", green_mode)
return run((cls,), args, **kwargs)
|
def function[run_server, parameter[cls, args]]:
constant[Run the class as a device server.
It is based on the tango.server.run method.
The difference is that the device class
and server name are automatically given.
Args:
args (iterable): args as given in the tango.server.run method
without the server name. If None, the sys.argv
list is used
kwargs: the other keywords argument are as given
in the tango.server.run method.
]
if compare[name[args] is constant[None]] begin[:]
variable[args] assign[=] call[name[sys].argv][<ast.Slice object at 0x7da20c6abaf0>]
variable[args] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da20c6a9f60>]] + call[name[list], parameter[name[args]]]]
variable[green_mode] assign[=] call[name[getattr], parameter[name[cls], constant[green_mode], constant[None]]]
call[name[kwargs].setdefault, parameter[constant[green_mode], name[green_mode]]]
return[call[name[run], parameter[tuple[[<ast.Name object at 0x7da20c6aa470>]], name[args]]]]
|
keyword[def] identifier[run_server] ( identifier[cls] , identifier[args] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[args] keyword[is] keyword[None] :
identifier[args] = identifier[sys] . identifier[argv] [ literal[int] :]
identifier[args] =[ identifier[cls] . identifier[__name__] ]+ identifier[list] ( identifier[args] )
identifier[green_mode] = identifier[getattr] ( identifier[cls] , literal[string] , keyword[None] )
identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[green_mode] )
keyword[return] identifier[run] (( identifier[cls] ,), identifier[args] ,** identifier[kwargs] )
|
def run_server(cls, args=None, **kwargs):
"""Run the class as a device server.
It is based on the tango.server.run method.
The difference is that the device class
and server name are automatically given.
Args:
args (iterable): args as given in the tango.server.run method
without the server name. If None, the sys.argv
list is used
kwargs: the other keywords argument are as given
in the tango.server.run method.
"""
if args is None:
args = sys.argv[1:] # depends on [control=['if'], data=['args']]
args = [cls.__name__] + list(args)
green_mode = getattr(cls, 'green_mode', None)
kwargs.setdefault('green_mode', green_mode)
return run((cls,), args, **kwargs)
|
def register_channel_post_handler(self, callback, *custom_filters, commands=None, regexp=None, content_types=None,
state=None, run_task=None, **kwargs):
"""
Register handler for channel post
:param callback:
:param commands: list of commands
:param regexp: REGEXP
:param content_types: List of content types.
:param state:
:param custom_filters: list of custom filters
:param run_task: run callback in task (no wait results)
:param kwargs:
:return: decorated function
"""
filters_set = self.filters_factory.resolve(self.channel_post_handlers,
*custom_filters,
commands=commands,
regexp=regexp,
content_types=content_types,
state=state,
**kwargs)
self.channel_post_handlers.register(self._wrap_async_task(callback, run_task), filters_set)
|
def function[register_channel_post_handler, parameter[self, callback]]:
constant[
Register handler for channel post
:param callback:
:param commands: list of commands
:param regexp: REGEXP
:param content_types: List of content types.
:param state:
:param custom_filters: list of custom filters
:param run_task: run callback in task (no wait results)
:param kwargs:
:return: decorated function
]
variable[filters_set] assign[=] call[name[self].filters_factory.resolve, parameter[name[self].channel_post_handlers, <ast.Starred object at 0x7da1b1846ce0>]]
call[name[self].channel_post_handlers.register, parameter[call[name[self]._wrap_async_task, parameter[name[callback], name[run_task]]], name[filters_set]]]
|
keyword[def] identifier[register_channel_post_handler] ( identifier[self] , identifier[callback] ,* identifier[custom_filters] , identifier[commands] = keyword[None] , identifier[regexp] = keyword[None] , identifier[content_types] = keyword[None] ,
identifier[state] = keyword[None] , identifier[run_task] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[filters_set] = identifier[self] . identifier[filters_factory] . identifier[resolve] ( identifier[self] . identifier[channel_post_handlers] ,
* identifier[custom_filters] ,
identifier[commands] = identifier[commands] ,
identifier[regexp] = identifier[regexp] ,
identifier[content_types] = identifier[content_types] ,
identifier[state] = identifier[state] ,
** identifier[kwargs] )
identifier[self] . identifier[channel_post_handlers] . identifier[register] ( identifier[self] . identifier[_wrap_async_task] ( identifier[callback] , identifier[run_task] ), identifier[filters_set] )
|
def register_channel_post_handler(self, callback, *custom_filters, commands=None, regexp=None, content_types=None, state=None, run_task=None, **kwargs):
"""
Register handler for channel post
:param callback:
:param commands: list of commands
:param regexp: REGEXP
:param content_types: List of content types.
:param state:
:param custom_filters: list of custom filters
:param run_task: run callback in task (no wait results)
:param kwargs:
:return: decorated function
"""
filters_set = self.filters_factory.resolve(self.channel_post_handlers, *custom_filters, commands=commands, regexp=regexp, content_types=content_types, state=state, **kwargs)
self.channel_post_handlers.register(self._wrap_async_task(callback, run_task), filters_set)
|
def _construct(self, targets, control_flow_slice=False):
"""
Construct a dependency graph based on given parameters.
:param targets: A list of tuples like (CFGNode, statement ID)
:param control_flow_slice: Is the backward slicing only depends on CFG or not.
"""
if control_flow_slice:
simruns = [ r for r, _ in targets ]
self._construct_control_flow_slice(simruns)
else:
self._construct_default(targets)
|
def function[_construct, parameter[self, targets, control_flow_slice]]:
constant[
Construct a dependency graph based on given parameters.
:param targets: A list of tuples like (CFGNode, statement ID)
:param control_flow_slice: Is the backward slicing only depends on CFG or not.
]
if name[control_flow_slice] begin[:]
variable[simruns] assign[=] <ast.ListComp object at 0x7da207f03cd0>
call[name[self]._construct_control_flow_slice, parameter[name[simruns]]]
|
keyword[def] identifier[_construct] ( identifier[self] , identifier[targets] , identifier[control_flow_slice] = keyword[False] ):
literal[string]
keyword[if] identifier[control_flow_slice] :
identifier[simruns] =[ identifier[r] keyword[for] identifier[r] , identifier[_] keyword[in] identifier[targets] ]
identifier[self] . identifier[_construct_control_flow_slice] ( identifier[simruns] )
keyword[else] :
identifier[self] . identifier[_construct_default] ( identifier[targets] )
|
def _construct(self, targets, control_flow_slice=False):
"""
Construct a dependency graph based on given parameters.
:param targets: A list of tuples like (CFGNode, statement ID)
:param control_flow_slice: Is the backward slicing only depends on CFG or not.
"""
if control_flow_slice:
simruns = [r for (r, _) in targets]
self._construct_control_flow_slice(simruns) # depends on [control=['if'], data=[]]
else:
self._construct_default(targets)
|
def _aggregate_ndoverlay(self, element, agg_fn):
"""
Optimized aggregation for NdOverlay objects by aggregating each
Element in an NdOverlay individually avoiding having to concatenate
items in the NdOverlay. Works by summing sum and count aggregates and
applying appropriate masking for NaN values. Mean aggregation
is also supported by dividing sum and count aggregates. count_cat
aggregates are grouped by the categorical dimension and a separate
aggregate for each category is generated.
"""
# Compute overall bounds
x, y = element.last.dimensions()[0:2]
info = self._get_sampling(element, x, y)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
if xtype == 'datetime':
x_range = tuple((np.array(x_range)/1e3).astype('datetime64[us]'))
if ytype == 'datetime':
y_range = tuple((np.array(y_range)/1e3).astype('datetime64[us]'))
agg_params = dict({k: v for k, v in dict(self.get_param_values(), **self.p).items()
if k in aggregate.params()},
x_range=x_range, y_range=y_range)
bbox = BoundingBox(points=[(x_range[0], y_range[0]), (x_range[1], y_range[1])])
# Optimize categorical counts by aggregating them individually
if isinstance(agg_fn, ds.count_cat):
agg_params.update(dict(dynamic=False, aggregator=ds.count()))
agg_fn1 = aggregate.instance(**agg_params)
if element.ndims == 1:
grouped = element
else:
grouped = element.groupby([agg_fn.column], container_type=NdOverlay,
group_type=NdOverlay)
groups = []
for k, v in grouped.items():
agg = agg_fn1(v)
groups.append((k, agg.clone(agg.data, bounds=bbox)))
return grouped.clone(groups)
# Create aggregate instance for sum, count operations, breaking mean
# into two aggregates
column = agg_fn.column or 'Count'
if isinstance(agg_fn, ds.mean):
agg_fn1 = aggregate.instance(**dict(agg_params, aggregator=ds.sum(column)))
agg_fn2 = aggregate.instance(**dict(agg_params, aggregator=ds.count()))
else:
agg_fn1 = aggregate.instance(**agg_params)
agg_fn2 = None
is_sum = isinstance(agg_fn1.aggregator, ds.sum)
# Accumulate into two aggregates and mask
agg, agg2, mask = None, None, None
mask = None
for v in element:
# Compute aggregates and mask
new_agg = agg_fn1.process_element(v, None)
if is_sum:
new_mask = np.isnan(new_agg.data[column].values)
new_agg.data = new_agg.data.fillna(0)
if agg_fn2:
new_agg2 = agg_fn2.process_element(v, None)
if agg is None:
agg = new_agg
if is_sum: mask = new_mask
if agg_fn2: agg2 = new_agg2
else:
agg.data += new_agg.data
if is_sum: mask &= new_mask
if agg_fn2: agg2.data += new_agg2.data
# Divide sum by count to compute mean
if agg2 is not None:
agg2.data.rename({'Count': agg_fn.column}, inplace=True)
with np.errstate(divide='ignore', invalid='ignore'):
agg.data /= agg2.data
# Fill masked with with NaNs
if is_sum:
agg.data[column].values[mask] = np.NaN
return agg.clone(bounds=bbox)
|
def function[_aggregate_ndoverlay, parameter[self, element, agg_fn]]:
constant[
Optimized aggregation for NdOverlay objects by aggregating each
Element in an NdOverlay individually avoiding having to concatenate
items in the NdOverlay. Works by summing sum and count aggregates and
applying appropriate masking for NaN values. Mean aggregation
is also supported by dividing sum and count aggregates. count_cat
aggregates are grouped by the categorical dimension and a separate
aggregate for each category is generated.
]
<ast.Tuple object at 0x7da20c76fe80> assign[=] call[call[name[element].last.dimensions, parameter[]]][<ast.Slice object at 0x7da20c76de40>]
variable[info] assign[=] call[name[self]._get_sampling, parameter[name[element], name[x], name[y]]]
<ast.Tuple object at 0x7da20c76ee00> assign[=] name[info]
if compare[name[xtype] equal[==] constant[datetime]] begin[:]
variable[x_range] assign[=] call[name[tuple], parameter[call[binary_operation[call[name[np].array, parameter[name[x_range]]] / constant[1000.0]].astype, parameter[constant[datetime64[us]]]]]]
if compare[name[ytype] equal[==] constant[datetime]] begin[:]
variable[y_range] assign[=] call[name[tuple], parameter[call[binary_operation[call[name[np].array, parameter[name[y_range]]] / constant[1000.0]].astype, parameter[constant[datetime64[us]]]]]]
variable[agg_params] assign[=] call[name[dict], parameter[<ast.DictComp object at 0x7da20c76efb0>]]
variable[bbox] assign[=] call[name[BoundingBox], parameter[]]
if call[name[isinstance], parameter[name[agg_fn], name[ds].count_cat]] begin[:]
call[name[agg_params].update, parameter[call[name[dict], parameter[]]]]
variable[agg_fn1] assign[=] call[name[aggregate].instance, parameter[]]
if compare[name[element].ndims equal[==] constant[1]] begin[:]
variable[grouped] assign[=] name[element]
variable[groups] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c76c1f0>, <ast.Name object at 0x7da20c76ece0>]]] in starred[call[name[grouped].items, parameter[]]] begin[:]
variable[agg] assign[=] call[name[agg_fn1], parameter[name[v]]]
call[name[groups].append, parameter[tuple[[<ast.Name object at 0x7da20c76e6e0>, <ast.Call object at 0x7da20c76e410>]]]]
return[call[name[grouped].clone, parameter[name[groups]]]]
variable[column] assign[=] <ast.BoolOp object at 0x7da20c76f700>
if call[name[isinstance], parameter[name[agg_fn], name[ds].mean]] begin[:]
variable[agg_fn1] assign[=] call[name[aggregate].instance, parameter[]]
variable[agg_fn2] assign[=] call[name[aggregate].instance, parameter[]]
variable[is_sum] assign[=] call[name[isinstance], parameter[name[agg_fn1].aggregator, name[ds].sum]]
<ast.Tuple object at 0x7da2054a7ee0> assign[=] tuple[[<ast.Constant object at 0x7da2054a7e80>, <ast.Constant object at 0x7da2054a60e0>, <ast.Constant object at 0x7da2054a52a0>]]
variable[mask] assign[=] constant[None]
for taget[name[v]] in starred[name[element]] begin[:]
variable[new_agg] assign[=] call[name[agg_fn1].process_element, parameter[name[v], constant[None]]]
if name[is_sum] begin[:]
variable[new_mask] assign[=] call[name[np].isnan, parameter[call[name[new_agg].data][name[column]].values]]
name[new_agg].data assign[=] call[name[new_agg].data.fillna, parameter[constant[0]]]
if name[agg_fn2] begin[:]
variable[new_agg2] assign[=] call[name[agg_fn2].process_element, parameter[name[v], constant[None]]]
if compare[name[agg] is constant[None]] begin[:]
variable[agg] assign[=] name[new_agg]
if name[is_sum] begin[:]
variable[mask] assign[=] name[new_mask]
if name[agg_fn2] begin[:]
variable[agg2] assign[=] name[new_agg2]
if compare[name[agg2] is_not constant[None]] begin[:]
call[name[agg2].data.rename, parameter[dictionary[[<ast.Constant object at 0x7da2054a5e10>], [<ast.Attribute object at 0x7da2054a4a90>]]]]
with call[name[np].errstate, parameter[]] begin[:]
<ast.AugAssign object at 0x7da2054a6e90>
if name[is_sum] begin[:]
call[call[name[agg].data][name[column]].values][name[mask]] assign[=] name[np].NaN
return[call[name[agg].clone, parameter[]]]
|
keyword[def] identifier[_aggregate_ndoverlay] ( identifier[self] , identifier[element] , identifier[agg_fn] ):
literal[string]
identifier[x] , identifier[y] = identifier[element] . identifier[last] . identifier[dimensions] ()[ literal[int] : literal[int] ]
identifier[info] = identifier[self] . identifier[_get_sampling] ( identifier[element] , identifier[x] , identifier[y] )
( identifier[x_range] , identifier[y_range] ),( identifier[xs] , identifier[ys] ),( identifier[width] , identifier[height] ),( identifier[xtype] , identifier[ytype] )= identifier[info]
keyword[if] identifier[xtype] == literal[string] :
identifier[x_range] = identifier[tuple] (( identifier[np] . identifier[array] ( identifier[x_range] )/ literal[int] ). identifier[astype] ( literal[string] ))
keyword[if] identifier[ytype] == literal[string] :
identifier[y_range] = identifier[tuple] (( identifier[np] . identifier[array] ( identifier[y_range] )/ literal[int] ). identifier[astype] ( literal[string] ))
identifier[agg_params] = identifier[dict] ({ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dict] ( identifier[self] . identifier[get_param_values] (),** identifier[self] . identifier[p] ). identifier[items] ()
keyword[if] identifier[k] keyword[in] identifier[aggregate] . identifier[params] ()},
identifier[x_range] = identifier[x_range] , identifier[y_range] = identifier[y_range] )
identifier[bbox] = identifier[BoundingBox] ( identifier[points] =[( identifier[x_range] [ literal[int] ], identifier[y_range] [ literal[int] ]),( identifier[x_range] [ literal[int] ], identifier[y_range] [ literal[int] ])])
keyword[if] identifier[isinstance] ( identifier[agg_fn] , identifier[ds] . identifier[count_cat] ):
identifier[agg_params] . identifier[update] ( identifier[dict] ( identifier[dynamic] = keyword[False] , identifier[aggregator] = identifier[ds] . identifier[count] ()))
identifier[agg_fn1] = identifier[aggregate] . identifier[instance] (** identifier[agg_params] )
keyword[if] identifier[element] . identifier[ndims] == literal[int] :
identifier[grouped] = identifier[element]
keyword[else] :
identifier[grouped] = identifier[element] . identifier[groupby] ([ identifier[agg_fn] . identifier[column] ], identifier[container_type] = identifier[NdOverlay] ,
identifier[group_type] = identifier[NdOverlay] )
identifier[groups] =[]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[grouped] . identifier[items] ():
identifier[agg] = identifier[agg_fn1] ( identifier[v] )
identifier[groups] . identifier[append] (( identifier[k] , identifier[agg] . identifier[clone] ( identifier[agg] . identifier[data] , identifier[bounds] = identifier[bbox] )))
keyword[return] identifier[grouped] . identifier[clone] ( identifier[groups] )
identifier[column] = identifier[agg_fn] . identifier[column] keyword[or] literal[string]
keyword[if] identifier[isinstance] ( identifier[agg_fn] , identifier[ds] . identifier[mean] ):
identifier[agg_fn1] = identifier[aggregate] . identifier[instance] (** identifier[dict] ( identifier[agg_params] , identifier[aggregator] = identifier[ds] . identifier[sum] ( identifier[column] )))
identifier[agg_fn2] = identifier[aggregate] . identifier[instance] (** identifier[dict] ( identifier[agg_params] , identifier[aggregator] = identifier[ds] . identifier[count] ()))
keyword[else] :
identifier[agg_fn1] = identifier[aggregate] . identifier[instance] (** identifier[agg_params] )
identifier[agg_fn2] = keyword[None]
identifier[is_sum] = identifier[isinstance] ( identifier[agg_fn1] . identifier[aggregator] , identifier[ds] . identifier[sum] )
identifier[agg] , identifier[agg2] , identifier[mask] = keyword[None] , keyword[None] , keyword[None]
identifier[mask] = keyword[None]
keyword[for] identifier[v] keyword[in] identifier[element] :
identifier[new_agg] = identifier[agg_fn1] . identifier[process_element] ( identifier[v] , keyword[None] )
keyword[if] identifier[is_sum] :
identifier[new_mask] = identifier[np] . identifier[isnan] ( identifier[new_agg] . identifier[data] [ identifier[column] ]. identifier[values] )
identifier[new_agg] . identifier[data] = identifier[new_agg] . identifier[data] . identifier[fillna] ( literal[int] )
keyword[if] identifier[agg_fn2] :
identifier[new_agg2] = identifier[agg_fn2] . identifier[process_element] ( identifier[v] , keyword[None] )
keyword[if] identifier[agg] keyword[is] keyword[None] :
identifier[agg] = identifier[new_agg]
keyword[if] identifier[is_sum] : identifier[mask] = identifier[new_mask]
keyword[if] identifier[agg_fn2] : identifier[agg2] = identifier[new_agg2]
keyword[else] :
identifier[agg] . identifier[data] += identifier[new_agg] . identifier[data]
keyword[if] identifier[is_sum] : identifier[mask] &= identifier[new_mask]
keyword[if] identifier[agg_fn2] : identifier[agg2] . identifier[data] += identifier[new_agg2] . identifier[data]
keyword[if] identifier[agg2] keyword[is] keyword[not] keyword[None] :
identifier[agg2] . identifier[data] . identifier[rename] ({ literal[string] : identifier[agg_fn] . identifier[column] }, identifier[inplace] = keyword[True] )
keyword[with] identifier[np] . identifier[errstate] ( identifier[divide] = literal[string] , identifier[invalid] = literal[string] ):
identifier[agg] . identifier[data] /= identifier[agg2] . identifier[data]
keyword[if] identifier[is_sum] :
identifier[agg] . identifier[data] [ identifier[column] ]. identifier[values] [ identifier[mask] ]= identifier[np] . identifier[NaN]
keyword[return] identifier[agg] . identifier[clone] ( identifier[bounds] = identifier[bbox] )
|
def _aggregate_ndoverlay(self, element, agg_fn):
"""
Optimized aggregation for NdOverlay objects by aggregating each
Element in an NdOverlay individually avoiding having to concatenate
items in the NdOverlay. Works by summing sum and count aggregates and
applying appropriate masking for NaN values. Mean aggregation
is also supported by dividing sum and count aggregates. count_cat
aggregates are grouped by the categorical dimension and a separate
aggregate for each category is generated.
"""
# Compute overall bounds
(x, y) = element.last.dimensions()[0:2]
info = self._get_sampling(element, x, y)
((x_range, y_range), (xs, ys), (width, height), (xtype, ytype)) = info
if xtype == 'datetime':
x_range = tuple((np.array(x_range) / 1000.0).astype('datetime64[us]')) # depends on [control=['if'], data=[]]
if ytype == 'datetime':
y_range = tuple((np.array(y_range) / 1000.0).astype('datetime64[us]')) # depends on [control=['if'], data=[]]
agg_params = dict({k: v for (k, v) in dict(self.get_param_values(), **self.p).items() if k in aggregate.params()}, x_range=x_range, y_range=y_range)
bbox = BoundingBox(points=[(x_range[0], y_range[0]), (x_range[1], y_range[1])])
# Optimize categorical counts by aggregating them individually
if isinstance(agg_fn, ds.count_cat):
agg_params.update(dict(dynamic=False, aggregator=ds.count()))
agg_fn1 = aggregate.instance(**agg_params)
if element.ndims == 1:
grouped = element # depends on [control=['if'], data=[]]
else:
grouped = element.groupby([agg_fn.column], container_type=NdOverlay, group_type=NdOverlay)
groups = []
for (k, v) in grouped.items():
agg = agg_fn1(v)
groups.append((k, agg.clone(agg.data, bounds=bbox))) # depends on [control=['for'], data=[]]
return grouped.clone(groups) # depends on [control=['if'], data=[]]
# Create aggregate instance for sum, count operations, breaking mean
# into two aggregates
column = agg_fn.column or 'Count'
if isinstance(agg_fn, ds.mean):
agg_fn1 = aggregate.instance(**dict(agg_params, aggregator=ds.sum(column)))
agg_fn2 = aggregate.instance(**dict(agg_params, aggregator=ds.count())) # depends on [control=['if'], data=[]]
else:
agg_fn1 = aggregate.instance(**agg_params)
agg_fn2 = None
is_sum = isinstance(agg_fn1.aggregator, ds.sum)
# Accumulate into two aggregates and mask
(agg, agg2, mask) = (None, None, None)
mask = None
for v in element:
# Compute aggregates and mask
new_agg = agg_fn1.process_element(v, None)
if is_sum:
new_mask = np.isnan(new_agg.data[column].values)
new_agg.data = new_agg.data.fillna(0) # depends on [control=['if'], data=[]]
if agg_fn2:
new_agg2 = agg_fn2.process_element(v, None) # depends on [control=['if'], data=[]]
if agg is None:
agg = new_agg
if is_sum:
mask = new_mask # depends on [control=['if'], data=[]]
if agg_fn2:
agg2 = new_agg2 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['agg']]
else:
agg.data += new_agg.data
if is_sum:
mask &= new_mask # depends on [control=['if'], data=[]]
if agg_fn2:
agg2.data += new_agg2.data # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']]
# Divide sum by count to compute mean
if agg2 is not None:
agg2.data.rename({'Count': agg_fn.column}, inplace=True)
with np.errstate(divide='ignore', invalid='ignore'):
agg.data /= agg2.data # depends on [control=['with'], data=[]] # depends on [control=['if'], data=['agg2']]
# Fill masked with with NaNs
if is_sum:
agg.data[column].values[mask] = np.NaN # depends on [control=['if'], data=[]]
return agg.clone(bounds=bbox)
|
def _handle_exclude(self, state, sls, saltenv, errors):
'''
Take the exclude dec out of the state and apply it to the highstate
global dec
'''
if 'exclude' in state:
exc = state.pop('exclude')
if not isinstance(exc, list):
err = ('Exclude Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
state.setdefault('__exclude__', []).extend(exc)
|
def function[_handle_exclude, parameter[self, state, sls, saltenv, errors]]:
constant[
Take the exclude dec out of the state and apply it to the highstate
global dec
]
if compare[constant[exclude] in name[state]] begin[:]
variable[exc] assign[=] call[name[state].pop, parameter[constant[exclude]]]
if <ast.UnaryOp object at 0x7da18dc05db0> begin[:]
variable[err] assign[=] call[constant[Exclude Declaration in SLS {0} is not formed as a list].format, parameter[name[sls]]]
call[name[errors].append, parameter[name[err]]]
call[call[name[state].setdefault, parameter[constant[__exclude__], list[[]]]].extend, parameter[name[exc]]]
|
keyword[def] identifier[_handle_exclude] ( identifier[self] , identifier[state] , identifier[sls] , identifier[saltenv] , identifier[errors] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[state] :
identifier[exc] = identifier[state] . identifier[pop] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[exc] , identifier[list] ):
identifier[err] =( literal[string]
literal[string] . identifier[format] ( identifier[sls] ))
identifier[errors] . identifier[append] ( identifier[err] )
identifier[state] . identifier[setdefault] ( literal[string] ,[]). identifier[extend] ( identifier[exc] )
|
def _handle_exclude(self, state, sls, saltenv, errors):
"""
Take the exclude dec out of the state and apply it to the highstate
global dec
"""
if 'exclude' in state:
exc = state.pop('exclude')
if not isinstance(exc, list):
err = 'Exclude Declaration in SLS {0} is not formed as a list'.format(sls)
errors.append(err) # depends on [control=['if'], data=[]]
state.setdefault('__exclude__', []).extend(exc) # depends on [control=['if'], data=['state']]
|
def deepupdate(original, update):
"""Recursively update a dict.
Subdict's won't be overwritten but also updated.
"""
for key, value in original.items():
if key not in update:
update[key] = value
elif isinstance(value, dict):
deepupdate(value, update[key])
return update
|
def function[deepupdate, parameter[original, update]]:
constant[Recursively update a dict.
Subdict's won't be overwritten but also updated.
]
for taget[tuple[[<ast.Name object at 0x7da1b1701450>, <ast.Name object at 0x7da1b1700f10>]]] in starred[call[name[original].items, parameter[]]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[update]] begin[:]
call[name[update]][name[key]] assign[=] name[value]
return[name[update]]
|
keyword[def] identifier[deepupdate] ( identifier[original] , identifier[update] ):
literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[original] . identifier[items] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[update] :
identifier[update] [ identifier[key] ]= identifier[value]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[deepupdate] ( identifier[value] , identifier[update] [ identifier[key] ])
keyword[return] identifier[update]
|
def deepupdate(original, update):
"""Recursively update a dict.
Subdict's won't be overwritten but also updated.
"""
for (key, value) in original.items():
if key not in update:
update[key] = value # depends on [control=['if'], data=['key', 'update']]
elif isinstance(value, dict):
deepupdate(value, update[key]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return update
|
def AreaUnderCurve(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
Parameters
----------
x : array, shape = [n]
x coordinates
y : array, shape = [n]
y coordinates
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)
>>> metrics.auc(fpr, tpr)
0.75
"""
#x, y = check_arrays(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('x and y should have the same shape'
' to compute area under curve,'
' but x.shape = %s and y.shape = %s.'
% (x.shape, y.shape))
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
# reorder the data points according to the x axis
order = np.argsort(x)
x = x[order]
y = y[order]
h = np.diff(x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area
|
def function[AreaUnderCurve, parameter[x, y]]:
constant[Compute Area Under the Curve (AUC) using the trapezoidal rule
Parameters
----------
x : array, shape = [n]
x coordinates
y : array, shape = [n]
y coordinates
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)
>>> metrics.auc(fpr, tpr)
0.75
]
if compare[call[name[x].shape][constant[0]] not_equal[!=] call[name[y].shape][constant[0]]] begin[:]
<ast.Raise object at 0x7da1b2346f80>
if compare[call[name[x].shape][constant[0]] less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da1b2344550>
variable[order] assign[=] call[name[np].argsort, parameter[name[x]]]
variable[x] assign[=] call[name[x]][name[order]]
variable[y] assign[=] call[name[y]][name[order]]
variable[h] assign[=] call[name[np].diff, parameter[name[x]]]
variable[area] assign[=] binary_operation[call[name[np].sum, parameter[binary_operation[name[h] * binary_operation[call[name[y]][<ast.Slice object at 0x7da1b2344970>] + call[name[y]][<ast.Slice object at 0x7da1b23442b0>]]]]] / constant[2.0]]
return[name[area]]
|
keyword[def] identifier[AreaUnderCurve] ( identifier[x] , identifier[y] ):
literal[string]
keyword[if] identifier[x] . identifier[shape] [ literal[int] ]!= identifier[y] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string]
%( identifier[x] . identifier[shape] , identifier[y] . identifier[shape] ))
keyword[if] identifier[x] . identifier[shape] [ literal[int] ]< literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] % identifier[x] . identifier[shape] )
identifier[order] = identifier[np] . identifier[argsort] ( identifier[x] )
identifier[x] = identifier[x] [ identifier[order] ]
identifier[y] = identifier[y] [ identifier[order] ]
identifier[h] = identifier[np] . identifier[diff] ( identifier[x] )
identifier[area] = identifier[np] . identifier[sum] ( identifier[h] *( identifier[y] [ literal[int] :]+ identifier[y] [:- literal[int] ]))/ literal[int]
keyword[return] identifier[area]
|
def AreaUnderCurve(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
Parameters
----------
x : array, shape = [n]
x coordinates
y : array, shape = [n]
y coordinates
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)
>>> metrics.auc(fpr, tpr)
0.75
"""
#x, y = check_arrays(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('x and y should have the same shape to compute area under curve, but x.shape = %s and y.shape = %s.' % (x.shape, y.shape)) # depends on [control=['if'], data=[]]
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute area under curve, but x.shape = %s' % x.shape) # depends on [control=['if'], data=[]]
# reorder the data points according to the x axis
order = np.argsort(x)
x = x[order]
y = y[order]
h = np.diff(x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area
|
def _get_positions_to_highlight(self, document):
"""
Return a list of (row, col) tuples that need to be highlighted.
"""
# Try for the character under the cursor.
if document.current_char and document.current_char in self.chars:
pos = document.find_matching_bracket_position(
start_pos=document.cursor_position - self.max_cursor_distance,
end_pos=document.cursor_position + self.max_cursor_distance)
# Try for the character before the cursor.
elif (document.char_before_cursor and document.char_before_cursor in
self._closing_braces and document.char_before_cursor in self.chars):
document = Document(document.text, document.cursor_position - 1)
pos = document.find_matching_bracket_position(
start_pos=document.cursor_position - self.max_cursor_distance,
end_pos=document.cursor_position + self.max_cursor_distance)
else:
pos = None
# Return a list of (row, col) tuples that need to be highlighted.
if pos:
pos += document.cursor_position # pos is relative.
row, col = document.translate_index_to_position(pos)
return [(row, col), (document.cursor_position_row, document.cursor_position_col)]
else:
return []
|
def function[_get_positions_to_highlight, parameter[self, document]]:
constant[
Return a list of (row, col) tuples that need to be highlighted.
]
if <ast.BoolOp object at 0x7da1b080a110> begin[:]
variable[pos] assign[=] call[name[document].find_matching_bracket_position, parameter[]]
if name[pos] begin[:]
<ast.AugAssign object at 0x7da1b080a350>
<ast.Tuple object at 0x7da1b080be80> assign[=] call[name[document].translate_index_to_position, parameter[name[pos]]]
return[list[[<ast.Tuple object at 0x7da1b080aaa0>, <ast.Tuple object at 0x7da1b080b130>]]]
|
keyword[def] identifier[_get_positions_to_highlight] ( identifier[self] , identifier[document] ):
literal[string]
keyword[if] identifier[document] . identifier[current_char] keyword[and] identifier[document] . identifier[current_char] keyword[in] identifier[self] . identifier[chars] :
identifier[pos] = identifier[document] . identifier[find_matching_bracket_position] (
identifier[start_pos] = identifier[document] . identifier[cursor_position] - identifier[self] . identifier[max_cursor_distance] ,
identifier[end_pos] = identifier[document] . identifier[cursor_position] + identifier[self] . identifier[max_cursor_distance] )
keyword[elif] ( identifier[document] . identifier[char_before_cursor] keyword[and] identifier[document] . identifier[char_before_cursor] keyword[in]
identifier[self] . identifier[_closing_braces] keyword[and] identifier[document] . identifier[char_before_cursor] keyword[in] identifier[self] . identifier[chars] ):
identifier[document] = identifier[Document] ( identifier[document] . identifier[text] , identifier[document] . identifier[cursor_position] - literal[int] )
identifier[pos] = identifier[document] . identifier[find_matching_bracket_position] (
identifier[start_pos] = identifier[document] . identifier[cursor_position] - identifier[self] . identifier[max_cursor_distance] ,
identifier[end_pos] = identifier[document] . identifier[cursor_position] + identifier[self] . identifier[max_cursor_distance] )
keyword[else] :
identifier[pos] = keyword[None]
keyword[if] identifier[pos] :
identifier[pos] += identifier[document] . identifier[cursor_position]
identifier[row] , identifier[col] = identifier[document] . identifier[translate_index_to_position] ( identifier[pos] )
keyword[return] [( identifier[row] , identifier[col] ),( identifier[document] . identifier[cursor_position_row] , identifier[document] . identifier[cursor_position_col] )]
keyword[else] :
keyword[return] []
|
def _get_positions_to_highlight(self, document):
"""
Return a list of (row, col) tuples that need to be highlighted.
"""
# Try for the character under the cursor.
if document.current_char and document.current_char in self.chars:
pos = document.find_matching_bracket_position(start_pos=document.cursor_position - self.max_cursor_distance, end_pos=document.cursor_position + self.max_cursor_distance) # depends on [control=['if'], data=[]]
# Try for the character before the cursor.
elif document.char_before_cursor and document.char_before_cursor in self._closing_braces and (document.char_before_cursor in self.chars):
document = Document(document.text, document.cursor_position - 1)
pos = document.find_matching_bracket_position(start_pos=document.cursor_position - self.max_cursor_distance, end_pos=document.cursor_position + self.max_cursor_distance) # depends on [control=['if'], data=[]]
else:
pos = None
# Return a list of (row, col) tuples that need to be highlighted.
if pos:
pos += document.cursor_position # pos is relative.
(row, col) = document.translate_index_to_position(pos)
return [(row, col), (document.cursor_position_row, document.cursor_position_col)] # depends on [control=['if'], data=[]]
else:
return []
|
def read_labels(filename, delimiter=DEFAULT_DELIMITER):
"""read label files. Format: ent label"""
_assert_good_file(filename)
with open(filename) as f:
labels = [_label_processing(l, delimiter) for l in f]
return labels
|
def function[read_labels, parameter[filename, delimiter]]:
constant[read label files. Format: ent label]
call[name[_assert_good_file], parameter[name[filename]]]
with call[name[open], parameter[name[filename]]] begin[:]
variable[labels] assign[=] <ast.ListComp object at 0x7da1b00a1f60>
return[name[labels]]
|
keyword[def] identifier[read_labels] ( identifier[filename] , identifier[delimiter] = identifier[DEFAULT_DELIMITER] ):
literal[string]
identifier[_assert_good_file] ( identifier[filename] )
keyword[with] identifier[open] ( identifier[filename] ) keyword[as] identifier[f] :
identifier[labels] =[ identifier[_label_processing] ( identifier[l] , identifier[delimiter] ) keyword[for] identifier[l] keyword[in] identifier[f] ]
keyword[return] identifier[labels]
|
def read_labels(filename, delimiter=DEFAULT_DELIMITER):
"""read label files. Format: ent label"""
_assert_good_file(filename)
with open(filename) as f:
labels = [_label_processing(l, delimiter) for l in f]
return labels # depends on [control=['with'], data=['f']]
|
def _add_admin_views(self):
"""
Registers indexview, utilview (back function), babel views and Security views.
"""
self.indexview = self._check_and_init(self.indexview)
self.add_view_no_menu(self.indexview)
self.add_view_no_menu(UtilView())
self.bm.register_views()
if self.get_app.config.get('FAB_ADD_SECURITY_VIEWS', True):
self.sm.register_views()
if self.get_app.config.get('FAB_ADD_OPENAPI_VIEWS', True):
self.openapi_manager.register_views()
|
def function[_add_admin_views, parameter[self]]:
constant[
Registers indexview, utilview (back function), babel views and Security views.
]
name[self].indexview assign[=] call[name[self]._check_and_init, parameter[name[self].indexview]]
call[name[self].add_view_no_menu, parameter[name[self].indexview]]
call[name[self].add_view_no_menu, parameter[call[name[UtilView], parameter[]]]]
call[name[self].bm.register_views, parameter[]]
if call[name[self].get_app.config.get, parameter[constant[FAB_ADD_SECURITY_VIEWS], constant[True]]] begin[:]
call[name[self].sm.register_views, parameter[]]
if call[name[self].get_app.config.get, parameter[constant[FAB_ADD_OPENAPI_VIEWS], constant[True]]] begin[:]
call[name[self].openapi_manager.register_views, parameter[]]
|
keyword[def] identifier[_add_admin_views] ( identifier[self] ):
literal[string]
identifier[self] . identifier[indexview] = identifier[self] . identifier[_check_and_init] ( identifier[self] . identifier[indexview] )
identifier[self] . identifier[add_view_no_menu] ( identifier[self] . identifier[indexview] )
identifier[self] . identifier[add_view_no_menu] ( identifier[UtilView] ())
identifier[self] . identifier[bm] . identifier[register_views] ()
keyword[if] identifier[self] . identifier[get_app] . identifier[config] . identifier[get] ( literal[string] , keyword[True] ):
identifier[self] . identifier[sm] . identifier[register_views] ()
keyword[if] identifier[self] . identifier[get_app] . identifier[config] . identifier[get] ( literal[string] , keyword[True] ):
identifier[self] . identifier[openapi_manager] . identifier[register_views] ()
|
def _add_admin_views(self):
"""
Registers indexview, utilview (back function), babel views and Security views.
"""
self.indexview = self._check_and_init(self.indexview)
self.add_view_no_menu(self.indexview)
self.add_view_no_menu(UtilView())
self.bm.register_views()
if self.get_app.config.get('FAB_ADD_SECURITY_VIEWS', True):
self.sm.register_views() # depends on [control=['if'], data=[]]
if self.get_app.config.get('FAB_ADD_OPENAPI_VIEWS', True):
self.openapi_manager.register_views() # depends on [control=['if'], data=[]]
|
def insert_all(db, schema_name, table_name, columns, items):
"""
Insert all item in given items list into the specified table, schema_name.table_name.
"""
table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name
columns_list = ', '.join(columns)
values_list = ', '.join(['?'] * len(columns))
query = 'INSERT INTO {table} ({columns}) VALUES ({values})'.format(
table=table, columns=columns_list, values=values_list)
for item in items:
values = [getattr(item, col) for col in columns]
db.execute(query, values)
|
def function[insert_all, parameter[db, schema_name, table_name, columns, items]]:
constant[
Insert all item in given items list into the specified table, schema_name.table_name.
]
variable[table] assign[=] <ast.IfExp object at 0x7da1b05db640>
variable[columns_list] assign[=] call[constant[, ].join, parameter[name[columns]]]
variable[values_list] assign[=] call[constant[, ].join, parameter[binary_operation[list[[<ast.Constant object at 0x7da1b05d9630>]] * call[name[len], parameter[name[columns]]]]]]
variable[query] assign[=] call[constant[INSERT INTO {table} ({columns}) VALUES ({values})].format, parameter[]]
for taget[name[item]] in starred[name[items]] begin[:]
variable[values] assign[=] <ast.ListComp object at 0x7da1b05dbd30>
call[name[db].execute, parameter[name[query], name[values]]]
|
keyword[def] identifier[insert_all] ( identifier[db] , identifier[schema_name] , identifier[table_name] , identifier[columns] , identifier[items] ):
literal[string]
identifier[table] = literal[string] . identifier[format] ( identifier[schema_name] , identifier[table_name] ) keyword[if] identifier[schema_name] keyword[else] identifier[table_name]
identifier[columns_list] = literal[string] . identifier[join] ( identifier[columns] )
identifier[values_list] = literal[string] . identifier[join] ([ literal[string] ]* identifier[len] ( identifier[columns] ))
identifier[query] = literal[string] . identifier[format] (
identifier[table] = identifier[table] , identifier[columns] = identifier[columns_list] , identifier[values] = identifier[values_list] )
keyword[for] identifier[item] keyword[in] identifier[items] :
identifier[values] =[ identifier[getattr] ( identifier[item] , identifier[col] ) keyword[for] identifier[col] keyword[in] identifier[columns] ]
identifier[db] . identifier[execute] ( identifier[query] , identifier[values] )
|
def insert_all(db, schema_name, table_name, columns, items):
"""
Insert all item in given items list into the specified table, schema_name.table_name.
"""
table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name
columns_list = ', '.join(columns)
values_list = ', '.join(['?'] * len(columns))
query = 'INSERT INTO {table} ({columns}) VALUES ({values})'.format(table=table, columns=columns_list, values=values_list)
for item in items:
values = [getattr(item, col) for col in columns]
db.execute(query, values) # depends on [control=['for'], data=['item']]
|
def run(
draco_query: List[str],
constants: Dict[str, str] = None,
files: List[str] = None,
relax_hard=False,
silence_warnings=False,
debug=False,
clear_cache=False,
) -> Optional[Result]:
""" Run clingo to compute a completion of a partial spec or violations. """
# Clear file cache. useful during development in notebooks.
if clear_cache and file_cache:
logger.warning("Cleared file cache")
file_cache.clear()
stderr, stdout = run_clingo(
draco_query, constants, files, relax_hard, silence_warnings, debug
)
try:
json_result = json.loads(stdout)
except json.JSONDecodeError:
logger.error("stdout: %s", stdout)
logger.error("stderr: %s", stderr)
raise
if stderr:
logger.error(stderr)
result = json_result["Result"]
if result == "UNSATISFIABLE":
logger.info("Constraints are unsatisfiable.")
return None
elif result == "OPTIMUM FOUND":
# get the last witness, which is the best result
answers = json_result["Call"][0]["Witnesses"][-1]
logger.debug(answers["Value"])
return Result(
clyngor.Answers(answers["Value"]).sorted,
cost=json_result["Models"]["Costs"][0],
)
elif result == "SATISFIABLE":
answers = json_result["Call"][0]["Witnesses"][-1]
assert (
json_result["Models"]["Number"] == 1
), "Should not have more than one model if we don't optimize"
logger.debug(answers["Value"])
return Result(clyngor.Answers(answers["Value"]).sorted)
else:
logger.error("Unsupported result: %s", result)
return None
|
def function[run, parameter[draco_query, constants, files, relax_hard, silence_warnings, debug, clear_cache]]:
constant[ Run clingo to compute a completion of a partial spec or violations. ]
if <ast.BoolOp object at 0x7da1b0b373a0> begin[:]
call[name[logger].warning, parameter[constant[Cleared file cache]]]
call[name[file_cache].clear, parameter[]]
<ast.Tuple object at 0x7da1b0b377f0> assign[=] call[name[run_clingo], parameter[name[draco_query], name[constants], name[files], name[relax_hard], name[silence_warnings], name[debug]]]
<ast.Try object at 0x7da1b0b35510>
if name[stderr] begin[:]
call[name[logger].error, parameter[name[stderr]]]
variable[result] assign[=] call[name[json_result]][constant[Result]]
if compare[name[result] equal[==] constant[UNSATISFIABLE]] begin[:]
call[name[logger].info, parameter[constant[Constraints are unsatisfiable.]]]
return[constant[None]]
|
keyword[def] identifier[run] (
identifier[draco_query] : identifier[List] [ identifier[str] ],
identifier[constants] : identifier[Dict] [ identifier[str] , identifier[str] ]= keyword[None] ,
identifier[files] : identifier[List] [ identifier[str] ]= keyword[None] ,
identifier[relax_hard] = keyword[False] ,
identifier[silence_warnings] = keyword[False] ,
identifier[debug] = keyword[False] ,
identifier[clear_cache] = keyword[False] ,
)-> identifier[Optional] [ identifier[Result] ]:
literal[string]
keyword[if] identifier[clear_cache] keyword[and] identifier[file_cache] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[file_cache] . identifier[clear] ()
identifier[stderr] , identifier[stdout] = identifier[run_clingo] (
identifier[draco_query] , identifier[constants] , identifier[files] , identifier[relax_hard] , identifier[silence_warnings] , identifier[debug]
)
keyword[try] :
identifier[json_result] = identifier[json] . identifier[loads] ( identifier[stdout] )
keyword[except] identifier[json] . identifier[JSONDecodeError] :
identifier[logger] . identifier[error] ( literal[string] , identifier[stdout] )
identifier[logger] . identifier[error] ( literal[string] , identifier[stderr] )
keyword[raise]
keyword[if] identifier[stderr] :
identifier[logger] . identifier[error] ( identifier[stderr] )
identifier[result] = identifier[json_result] [ literal[string] ]
keyword[if] identifier[result] == literal[string] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] keyword[None]
keyword[elif] identifier[result] == literal[string] :
identifier[answers] = identifier[json_result] [ literal[string] ][ literal[int] ][ literal[string] ][- literal[int] ]
identifier[logger] . identifier[debug] ( identifier[answers] [ literal[string] ])
keyword[return] identifier[Result] (
identifier[clyngor] . identifier[Answers] ( identifier[answers] [ literal[string] ]). identifier[sorted] ,
identifier[cost] = identifier[json_result] [ literal[string] ][ literal[string] ][ literal[int] ],
)
keyword[elif] identifier[result] == literal[string] :
identifier[answers] = identifier[json_result] [ literal[string] ][ literal[int] ][ literal[string] ][- literal[int] ]
keyword[assert] (
identifier[json_result] [ literal[string] ][ literal[string] ]== literal[int]
), literal[string]
identifier[logger] . identifier[debug] ( identifier[answers] [ literal[string] ])
keyword[return] identifier[Result] ( identifier[clyngor] . identifier[Answers] ( identifier[answers] [ literal[string] ]). identifier[sorted] )
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] , identifier[result] )
keyword[return] keyword[None]
|
def run(draco_query: List[str], constants: Dict[str, str]=None, files: List[str]=None, relax_hard=False, silence_warnings=False, debug=False, clear_cache=False) -> Optional[Result]:
""" Run clingo to compute a completion of a partial spec or violations. """
# Clear file cache. useful during development in notebooks.
if clear_cache and file_cache:
logger.warning('Cleared file cache')
file_cache.clear() # depends on [control=['if'], data=[]]
(stderr, stdout) = run_clingo(draco_query, constants, files, relax_hard, silence_warnings, debug)
try:
json_result = json.loads(stdout) # depends on [control=['try'], data=[]]
except json.JSONDecodeError:
logger.error('stdout: %s', stdout)
logger.error('stderr: %s', stderr)
raise # depends on [control=['except'], data=[]]
if stderr:
logger.error(stderr) # depends on [control=['if'], data=[]]
result = json_result['Result']
if result == 'UNSATISFIABLE':
logger.info('Constraints are unsatisfiable.')
return None # depends on [control=['if'], data=[]]
elif result == 'OPTIMUM FOUND':
# get the last witness, which is the best result
answers = json_result['Call'][0]['Witnesses'][-1]
logger.debug(answers['Value'])
return Result(clyngor.Answers(answers['Value']).sorted, cost=json_result['Models']['Costs'][0]) # depends on [control=['if'], data=[]]
elif result == 'SATISFIABLE':
answers = json_result['Call'][0]['Witnesses'][-1]
assert json_result['Models']['Number'] == 1, "Should not have more than one model if we don't optimize"
logger.debug(answers['Value'])
return Result(clyngor.Answers(answers['Value']).sorted) # depends on [control=['if'], data=[]]
else:
logger.error('Unsupported result: %s', result)
return None
|
def assertFileSizeGreater(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not greater than ``size`` as
determined by the '>' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertGreater(fsize, size, msg=msg)
|
def function[assertFileSizeGreater, parameter[self, filename, size, msg]]:
constant[Fail if ``filename``'s size is not greater than ``size`` as
determined by the '>' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
]
variable[fsize] assign[=] call[name[self]._get_file_size, parameter[name[filename]]]
call[name[self].assertGreater, parameter[name[fsize], name[size]]]
|
keyword[def] identifier[assertFileSizeGreater] ( identifier[self] , identifier[filename] , identifier[size] , identifier[msg] = keyword[None] ):
literal[string]
identifier[fsize] = identifier[self] . identifier[_get_file_size] ( identifier[filename] )
identifier[self] . identifier[assertGreater] ( identifier[fsize] , identifier[size] , identifier[msg] = identifier[msg] )
|
def assertFileSizeGreater(self, filename, size, msg=None):
"""Fail if ``filename``'s size is not greater than ``size`` as
determined by the '>' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
"""
fsize = self._get_file_size(filename)
self.assertGreater(fsize, size, msg=msg)
|
def std(expr, **kw):
"""
Standard deviation.
:param expr:
:param kw:
:return:
"""
ddof = kw.get('ddof', kw.get('_ddof', 1))
output_type = _stats_type(expr)
return _reduction(expr, Std, output_type, _ddof=ddof)
|
def function[std, parameter[expr]]:
constant[
Standard deviation.
:param expr:
:param kw:
:return:
]
variable[ddof] assign[=] call[name[kw].get, parameter[constant[ddof], call[name[kw].get, parameter[constant[_ddof], constant[1]]]]]
variable[output_type] assign[=] call[name[_stats_type], parameter[name[expr]]]
return[call[name[_reduction], parameter[name[expr], name[Std], name[output_type]]]]
|
keyword[def] identifier[std] ( identifier[expr] ,** identifier[kw] ):
literal[string]
identifier[ddof] = identifier[kw] . identifier[get] ( literal[string] , identifier[kw] . identifier[get] ( literal[string] , literal[int] ))
identifier[output_type] = identifier[_stats_type] ( identifier[expr] )
keyword[return] identifier[_reduction] ( identifier[expr] , identifier[Std] , identifier[output_type] , identifier[_ddof] = identifier[ddof] )
|
def std(expr, **kw):
"""
Standard deviation.
:param expr:
:param kw:
:return:
"""
ddof = kw.get('ddof', kw.get('_ddof', 1))
output_type = _stats_type(expr)
return _reduction(expr, Std, output_type, _ddof=ddof)
|
def send_exit_with_code(cls, sock, code):
"""Send an Exit chunk over the specified socket, containing the specified return code."""
encoded_exit_status = cls.encode_int(code)
cls.send_exit(sock, payload=encoded_exit_status)
|
def function[send_exit_with_code, parameter[cls, sock, code]]:
constant[Send an Exit chunk over the specified socket, containing the specified return code.]
variable[encoded_exit_status] assign[=] call[name[cls].encode_int, parameter[name[code]]]
call[name[cls].send_exit, parameter[name[sock]]]
|
keyword[def] identifier[send_exit_with_code] ( identifier[cls] , identifier[sock] , identifier[code] ):
literal[string]
identifier[encoded_exit_status] = identifier[cls] . identifier[encode_int] ( identifier[code] )
identifier[cls] . identifier[send_exit] ( identifier[sock] , identifier[payload] = identifier[encoded_exit_status] )
|
def send_exit_with_code(cls, sock, code):
"""Send an Exit chunk over the specified socket, containing the specified return code."""
encoded_exit_status = cls.encode_int(code)
cls.send_exit(sock, payload=encoded_exit_status)
|
def addNode(self, node):
'''
Update the shared map with my in-construction node
'''
self.mybldgbuids[node.buid] = node
self.allbldgbuids[node.buid] = (node, self.doneevent)
|
def function[addNode, parameter[self, node]]:
constant[
Update the shared map with my in-construction node
]
call[name[self].mybldgbuids][name[node].buid] assign[=] name[node]
call[name[self].allbldgbuids][name[node].buid] assign[=] tuple[[<ast.Name object at 0x7da207f03160>, <ast.Attribute object at 0x7da207f01000>]]
|
keyword[def] identifier[addNode] ( identifier[self] , identifier[node] ):
literal[string]
identifier[self] . identifier[mybldgbuids] [ identifier[node] . identifier[buid] ]= identifier[node]
identifier[self] . identifier[allbldgbuids] [ identifier[node] . identifier[buid] ]=( identifier[node] , identifier[self] . identifier[doneevent] )
|
def addNode(self, node):
"""
Update the shared map with my in-construction node
"""
self.mybldgbuids[node.buid] = node
self.allbldgbuids[node.buid] = (node, self.doneevent)
|
def is_b_connected(H, source_node, target_node):
"""Checks if a target node is B-connected to a source node.
A node t is B-connected to a node s iff:
- t is s, or
- there exists an edge in the backward star of t such that all nodes in
the tail of that edge are B-connected to s
In other words, this method determines if a target node can be B-visited
from the source node in the sense of the 'B-Visit' algorithm. Refer to
'b_visit's documentation for more details.
:param H: the hypergraph to check B-connectedness on.
:param source_node: the node to check B-connectedness to.
:param target_node: the node to check B-connectedness of.
:returns: bool -- whether target_node can be visited from source_node.
"""
b_visited_nodes, Pv, Pe, v = b_visit(H, source_node)
return target_node in b_visited_nodes
|
def function[is_b_connected, parameter[H, source_node, target_node]]:
constant[Checks if a target node is B-connected to a source node.
A node t is B-connected to a node s iff:
- t is s, or
- there exists an edge in the backward star of t such that all nodes in
the tail of that edge are B-connected to s
In other words, this method determines if a target node can be B-visited
from the source node in the sense of the 'B-Visit' algorithm. Refer to
'b_visit's documentation for more details.
:param H: the hypergraph to check B-connectedness on.
:param source_node: the node to check B-connectedness to.
:param target_node: the node to check B-connectedness of.
:returns: bool -- whether target_node can be visited from source_node.
]
<ast.Tuple object at 0x7da1b11d6560> assign[=] call[name[b_visit], parameter[name[H], name[source_node]]]
return[compare[name[target_node] in name[b_visited_nodes]]]
|
keyword[def] identifier[is_b_connected] ( identifier[H] , identifier[source_node] , identifier[target_node] ):
literal[string]
identifier[b_visited_nodes] , identifier[Pv] , identifier[Pe] , identifier[v] = identifier[b_visit] ( identifier[H] , identifier[source_node] )
keyword[return] identifier[target_node] keyword[in] identifier[b_visited_nodes]
|
def is_b_connected(H, source_node, target_node):
"""Checks if a target node is B-connected to a source node.
A node t is B-connected to a node s iff:
- t is s, or
- there exists an edge in the backward star of t such that all nodes in
the tail of that edge are B-connected to s
In other words, this method determines if a target node can be B-visited
from the source node in the sense of the 'B-Visit' algorithm. Refer to
'b_visit's documentation for more details.
:param H: the hypergraph to check B-connectedness on.
:param source_node: the node to check B-connectedness to.
:param target_node: the node to check B-connectedness of.
:returns: bool -- whether target_node can be visited from source_node.
"""
(b_visited_nodes, Pv, Pe, v) = b_visit(H, source_node)
return target_node in b_visited_nodes
|
def configure():
"""Load logging configuration from our own defaults."""
log_levels = {
5: logging.NOTSET,
4: logging.DEBUG,
3: logging.INFO,
2: logging.WARNING,
1: logging.ERROR,
0: logging.CRITICAL
}
logging.captureWarnings(True)
root_logger = logging.getLogger()
if settings.CFG["debug"]:
details_format = logging.Formatter(
'%(name)s (%(filename)s:%(lineno)s) [%(levelname)s] %(message)s')
details_hdl = logging.StreamHandler()
details_hdl.setFormatter(details_format)
root_logger.addHandler(details_hdl)
else:
brief_format = logging.Formatter('%(message)s')
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(brief_format)
root_logger.addHandler(console_hdl)
root_logger.setLevel(log_levels[int(settings.CFG["verbosity"])])
configure_plumbum_log()
configure_migrate_log()
configure_parse_log()
|
def function[configure, parameter[]]:
constant[Load logging configuration from our own defaults.]
variable[log_levels] assign[=] dictionary[[<ast.Constant object at 0x7da20c7cae60>, <ast.Constant object at 0x7da20c7c8c40>, <ast.Constant object at 0x7da20c7cace0>, <ast.Constant object at 0x7da20c7c8400>, <ast.Constant object at 0x7da20c7cb580>, <ast.Constant object at 0x7da20c7c9420>], [<ast.Attribute object at 0x7da20c7c9d20>, <ast.Attribute object at 0x7da20c7c92d0>, <ast.Attribute object at 0x7da20c7cb190>, <ast.Attribute object at 0x7da20c7cad10>, <ast.Attribute object at 0x7da20c7ca4d0>, <ast.Attribute object at 0x7da20c7c8f10>]]
call[name[logging].captureWarnings, parameter[constant[True]]]
variable[root_logger] assign[=] call[name[logging].getLogger, parameter[]]
if call[name[settings].CFG][constant[debug]] begin[:]
variable[details_format] assign[=] call[name[logging].Formatter, parameter[constant[%(name)s (%(filename)s:%(lineno)s) [%(levelname)s] %(message)s]]]
variable[details_hdl] assign[=] call[name[logging].StreamHandler, parameter[]]
call[name[details_hdl].setFormatter, parameter[name[details_format]]]
call[name[root_logger].addHandler, parameter[name[details_hdl]]]
call[name[root_logger].setLevel, parameter[call[name[log_levels]][call[name[int], parameter[call[name[settings].CFG][constant[verbosity]]]]]]]
call[name[configure_plumbum_log], parameter[]]
call[name[configure_migrate_log], parameter[]]
call[name[configure_parse_log], parameter[]]
|
keyword[def] identifier[configure] ():
literal[string]
identifier[log_levels] ={
literal[int] : identifier[logging] . identifier[NOTSET] ,
literal[int] : identifier[logging] . identifier[DEBUG] ,
literal[int] : identifier[logging] . identifier[INFO] ,
literal[int] : identifier[logging] . identifier[WARNING] ,
literal[int] : identifier[logging] . identifier[ERROR] ,
literal[int] : identifier[logging] . identifier[CRITICAL]
}
identifier[logging] . identifier[captureWarnings] ( keyword[True] )
identifier[root_logger] = identifier[logging] . identifier[getLogger] ()
keyword[if] identifier[settings] . identifier[CFG] [ literal[string] ]:
identifier[details_format] = identifier[logging] . identifier[Formatter] (
literal[string] )
identifier[details_hdl] = identifier[logging] . identifier[StreamHandler] ()
identifier[details_hdl] . identifier[setFormatter] ( identifier[details_format] )
identifier[root_logger] . identifier[addHandler] ( identifier[details_hdl] )
keyword[else] :
identifier[brief_format] = identifier[logging] . identifier[Formatter] ( literal[string] )
identifier[console_hdl] = identifier[logging] . identifier[StreamHandler] ()
identifier[console_hdl] . identifier[setFormatter] ( identifier[brief_format] )
identifier[root_logger] . identifier[addHandler] ( identifier[console_hdl] )
identifier[root_logger] . identifier[setLevel] ( identifier[log_levels] [ identifier[int] ( identifier[settings] . identifier[CFG] [ literal[string] ])])
identifier[configure_plumbum_log] ()
identifier[configure_migrate_log] ()
identifier[configure_parse_log] ()
|
def configure():
"""Load logging configuration from our own defaults."""
log_levels = {5: logging.NOTSET, 4: logging.DEBUG, 3: logging.INFO, 2: logging.WARNING, 1: logging.ERROR, 0: logging.CRITICAL}
logging.captureWarnings(True)
root_logger = logging.getLogger()
if settings.CFG['debug']:
details_format = logging.Formatter('%(name)s (%(filename)s:%(lineno)s) [%(levelname)s] %(message)s')
details_hdl = logging.StreamHandler()
details_hdl.setFormatter(details_format)
root_logger.addHandler(details_hdl) # depends on [control=['if'], data=[]]
else:
brief_format = logging.Formatter('%(message)s')
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(brief_format)
root_logger.addHandler(console_hdl)
root_logger.setLevel(log_levels[int(settings.CFG['verbosity'])])
configure_plumbum_log()
configure_migrate_log()
configure_parse_log()
|
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
|
def function[prepare_body, parameter[self, data, files, json]]:
constant[Prepares the given HTTP body data.]
variable[body] assign[=] constant[None]
variable[content_type] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b1de1f60> begin[:]
variable[content_type] assign[=] constant[application/json]
variable[body] assign[=] call[name[complexjson].dumps, parameter[name[json]]]
if <ast.UnaryOp object at 0x7da1b1f82680> begin[:]
variable[body] assign[=] call[name[body].encode, parameter[constant[utf-8]]]
variable[is_stream] assign[=] call[name[all], parameter[list[[<ast.Call object at 0x7da1b1f81c60>, <ast.UnaryOp object at 0x7da1b1de30a0>]]]]
<ast.Try object at 0x7da1b1de3850>
if name[is_stream] begin[:]
variable[body] assign[=] name[data]
if compare[call[name[getattr], parameter[name[body], constant[tell], constant[None]]] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b1de1930>
if name[files] begin[:]
<ast.Raise object at 0x7da1b1de1ea0>
if name[length] begin[:]
call[name[self].headers][constant[Content-Length]] assign[=] call[name[builtin_str], parameter[name[length]]]
name[self].body assign[=] name[body]
|
keyword[def] identifier[prepare_body] ( identifier[self] , identifier[data] , identifier[files] , identifier[json] = keyword[None] ):
literal[string]
identifier[body] = keyword[None]
identifier[content_type] = keyword[None]
keyword[if] keyword[not] identifier[data] keyword[and] identifier[json] keyword[is] keyword[not] keyword[None] :
identifier[content_type] = literal[string]
identifier[body] = identifier[complexjson] . identifier[dumps] ( identifier[json] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[body] , identifier[bytes] ):
identifier[body] = identifier[body] . identifier[encode] ( literal[string] )
identifier[is_stream] = identifier[all] ([
identifier[hasattr] ( identifier[data] , literal[string] ),
keyword[not] identifier[isinstance] ( identifier[data] ,( identifier[basestring] , identifier[list] , identifier[tuple] , identifier[Mapping] ))
])
keyword[try] :
identifier[length] = identifier[super_len] ( identifier[data] )
keyword[except] ( identifier[TypeError] , identifier[AttributeError] , identifier[UnsupportedOperation] ):
identifier[length] = keyword[None]
keyword[if] identifier[is_stream] :
identifier[body] = identifier[data]
keyword[if] identifier[getattr] ( identifier[body] , literal[string] , keyword[None] ) keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[self] . identifier[_body_position] = identifier[body] . identifier[tell] ()
keyword[except] ( identifier[IOError] , identifier[OSError] ):
identifier[self] . identifier[_body_position] = identifier[object] ()
keyword[if] identifier[files] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
keyword[if] identifier[length] :
identifier[self] . identifier[headers] [ literal[string] ]= identifier[builtin_str] ( identifier[length] )
keyword[else] :
identifier[self] . identifier[headers] [ literal[string] ]= literal[string]
keyword[else] :
keyword[if] identifier[files] :
( identifier[body] , identifier[content_type] )= identifier[self] . identifier[_encode_files] ( identifier[files] , identifier[data] )
keyword[else] :
keyword[if] identifier[data] :
identifier[body] = identifier[self] . identifier[_encode_params] ( identifier[data] )
keyword[if] identifier[isinstance] ( identifier[data] , identifier[basestring] ) keyword[or] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[content_type] = keyword[None]
keyword[else] :
identifier[content_type] = literal[string]
identifier[self] . identifier[prepare_content_length] ( identifier[body] )
keyword[if] identifier[content_type] keyword[and] ( literal[string] keyword[not] keyword[in] identifier[self] . identifier[headers] ):
identifier[self] . identifier[headers] [ literal[string] ]= identifier[content_type]
identifier[self] . identifier[body] = identifier[body]
|
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
is_stream = all([hasattr(data, '__iter__'), not isinstance(data, (basestring, list, tuple, Mapping))])
try:
length = super_len(data) # depends on [control=['try'], data=[]]
except (TypeError, AttributeError, UnsupportedOperation):
length = None # depends on [control=['except'], data=[]]
if is_stream:
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell() # depends on [control=['try'], data=[]]
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.') # depends on [control=['if'], data=[]]
if length:
self.headers['Content-Length'] = builtin_str(length) # depends on [control=['if'], data=[]]
else:
self.headers['Transfer-Encoding'] = 'chunked' # depends on [control=['if'], data=[]]
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data) # depends on [control=['if'], data=[]]
elif data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None # depends on [control=['if'], data=[]]
else:
content_type = 'application/x-www-form-urlencoded' # depends on [control=['if'], data=[]]
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and 'content-type' not in self.headers:
self.headers['Content-Type'] = content_type # depends on [control=['if'], data=[]]
self.body = body
|
def update_collection_by_id(cls, collection_id, collection, **kwargs):
"""Update Collection
Update attributes of Collection
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_collection_by_id(collection_id, collection, async=True)
>>> result = thread.get()
:param async bool
:param str collection_id: ID of collection to update. (required)
:param Collection collection: Attributes of collection to update. (required)
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_collection_by_id_with_http_info(collection_id, collection, **kwargs)
else:
(data) = cls._update_collection_by_id_with_http_info(collection_id, collection, **kwargs)
return data
|
def function[update_collection_by_id, parameter[cls, collection_id, collection]]:
constant[Update Collection
Update attributes of Collection
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_collection_by_id(collection_id, collection, async=True)
>>> result = thread.get()
:param async bool
:param str collection_id: ID of collection to update. (required)
:param Collection collection: Attributes of collection to update. (required)
:return: Collection
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._update_collection_by_id_with_http_info, parameter[name[collection_id], name[collection]]]]
|
keyword[def] identifier[update_collection_by_id] ( identifier[cls] , identifier[collection_id] , identifier[collection] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_update_collection_by_id_with_http_info] ( identifier[collection_id] , identifier[collection] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_update_collection_by_id_with_http_info] ( identifier[collection_id] , identifier[collection] ,** identifier[kwargs] )
keyword[return] identifier[data]
|
def update_collection_by_id(cls, collection_id, collection, **kwargs):
"""Update Collection
Update attributes of Collection
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_collection_by_id(collection_id, collection, async=True)
>>> result = thread.get()
:param async bool
:param str collection_id: ID of collection to update. (required)
:param Collection collection: Attributes of collection to update. (required)
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_collection_by_id_with_http_info(collection_id, collection, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._update_collection_by_id_with_http_info(collection_id, collection, **kwargs)
return data
|
def set(self, ctype, key, data):
"""Set or update cache content.
:param ctype: cache type
:param key: the key to be set value
:param data: cache data
"""
with zvmutils.acquire_lock(self._lock):
target_cache = self._get_ctype_cache(ctype)
target_cache['data'][key] = data
|
def function[set, parameter[self, ctype, key, data]]:
constant[Set or update cache content.
:param ctype: cache type
:param key: the key to be set value
:param data: cache data
]
with call[name[zvmutils].acquire_lock, parameter[name[self]._lock]] begin[:]
variable[target_cache] assign[=] call[name[self]._get_ctype_cache, parameter[name[ctype]]]
call[call[name[target_cache]][constant[data]]][name[key]] assign[=] name[data]
|
keyword[def] identifier[set] ( identifier[self] , identifier[ctype] , identifier[key] , identifier[data] ):
literal[string]
keyword[with] identifier[zvmutils] . identifier[acquire_lock] ( identifier[self] . identifier[_lock] ):
identifier[target_cache] = identifier[self] . identifier[_get_ctype_cache] ( identifier[ctype] )
identifier[target_cache] [ literal[string] ][ identifier[key] ]= identifier[data]
|
def set(self, ctype, key, data):
"""Set or update cache content.
:param ctype: cache type
:param key: the key to be set value
:param data: cache data
"""
with zvmutils.acquire_lock(self._lock):
target_cache = self._get_ctype_cache(ctype)
target_cache['data'][key] = data # depends on [control=['with'], data=[]]
|
def save(self, *args, **kwargs):
"""
Save the created_by and last_modified_by fields based on the current admin user.
"""
if not self.instance.id:
self.instance.created_by = self.user
self.instance.last_modified_by = self.user
return super(ChangeableContentForm, self).save(*args, **kwargs)
|
def function[save, parameter[self]]:
constant[
Save the created_by and last_modified_by fields based on the current admin user.
]
if <ast.UnaryOp object at 0x7da1b15b0250> begin[:]
name[self].instance.created_by assign[=] name[self].user
name[self].instance.last_modified_by assign[=] name[self].user
return[call[call[name[super], parameter[name[ChangeableContentForm], name[self]]].save, parameter[<ast.Starred object at 0x7da1b13bbd90>]]]
|
keyword[def] identifier[save] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[instance] . identifier[id] :
identifier[self] . identifier[instance] . identifier[created_by] = identifier[self] . identifier[user]
identifier[self] . identifier[instance] . identifier[last_modified_by] = identifier[self] . identifier[user]
keyword[return] identifier[super] ( identifier[ChangeableContentForm] , identifier[self] ). identifier[save] (* identifier[args] ,** identifier[kwargs] )
|
def save(self, *args, **kwargs):
"""
Save the created_by and last_modified_by fields based on the current admin user.
"""
if not self.instance.id:
self.instance.created_by = self.user # depends on [control=['if'], data=[]]
self.instance.last_modified_by = self.user
return super(ChangeableContentForm, self).save(*args, **kwargs)
|
def addVariant(self,variant):
'''Appends one Variant to variants
'''
if isinstance(variant, Variant):
self.variants.append(variant)
else:
raise (VariantError,
'variant Type should be Variant, not %s' % type(variant))
|
def function[addVariant, parameter[self, variant]]:
constant[Appends one Variant to variants
]
if call[name[isinstance], parameter[name[variant], name[Variant]]] begin[:]
call[name[self].variants.append, parameter[name[variant]]]
|
keyword[def] identifier[addVariant] ( identifier[self] , identifier[variant] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[variant] , identifier[Variant] ):
identifier[self] . identifier[variants] . identifier[append] ( identifier[variant] )
keyword[else] :
keyword[raise] ( identifier[VariantError] ,
literal[string] % identifier[type] ( identifier[variant] ))
|
def addVariant(self, variant):
"""Appends one Variant to variants
"""
if isinstance(variant, Variant):
self.variants.append(variant) # depends on [control=['if'], data=[]]
else:
raise (VariantError, 'variant Type should be Variant, not %s' % type(variant))
|
def match_fn(match_values_and_abi, data):
"""Match function used for filtering non-indexed event arguments.
Values provided through the match_values_and_abi parameter are
compared to the abi decoded log data.
"""
abi_types, all_match_values = zip(*match_values_and_abi)
decoded_values = decode_abi(abi_types, HexBytes(data))
for data_value, match_values, abi_type in zip(decoded_values, all_match_values, abi_types):
if match_values is None:
continue
normalized_data = normalize_data_values(abi_type, data_value)
for value in match_values:
if not is_encodable(abi_type, value):
raise ValueError(
"Value {0} is of the wrong abi type. "
"Expected {1} typed value.".format(value, abi_type))
if value == normalized_data:
break
else:
return False
return True
|
def function[match_fn, parameter[match_values_and_abi, data]]:
constant[Match function used for filtering non-indexed event arguments.
Values provided through the match_values_and_abi parameter are
compared to the abi decoded log data.
]
<ast.Tuple object at 0x7da18f813b80> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da18f8111e0>]]
variable[decoded_values] assign[=] call[name[decode_abi], parameter[name[abi_types], call[name[HexBytes], parameter[name[data]]]]]
for taget[tuple[[<ast.Name object at 0x7da18f811870>, <ast.Name object at 0x7da18f810430>, <ast.Name object at 0x7da18f8131c0>]]] in starred[call[name[zip], parameter[name[decoded_values], name[all_match_values], name[abi_types]]]] begin[:]
if compare[name[match_values] is constant[None]] begin[:]
continue
variable[normalized_data] assign[=] call[name[normalize_data_values], parameter[name[abi_type], name[data_value]]]
for taget[name[value]] in starred[name[match_values]] begin[:]
if <ast.UnaryOp object at 0x7da18f811cc0> begin[:]
<ast.Raise object at 0x7da18f810820>
if compare[name[value] equal[==] name[normalized_data]] begin[:]
break
return[constant[True]]
|
keyword[def] identifier[match_fn] ( identifier[match_values_and_abi] , identifier[data] ):
literal[string]
identifier[abi_types] , identifier[all_match_values] = identifier[zip] (* identifier[match_values_and_abi] )
identifier[decoded_values] = identifier[decode_abi] ( identifier[abi_types] , identifier[HexBytes] ( identifier[data] ))
keyword[for] identifier[data_value] , identifier[match_values] , identifier[abi_type] keyword[in] identifier[zip] ( identifier[decoded_values] , identifier[all_match_values] , identifier[abi_types] ):
keyword[if] identifier[match_values] keyword[is] keyword[None] :
keyword[continue]
identifier[normalized_data] = identifier[normalize_data_values] ( identifier[abi_type] , identifier[data_value] )
keyword[for] identifier[value] keyword[in] identifier[match_values] :
keyword[if] keyword[not] identifier[is_encodable] ( identifier[abi_type] , identifier[value] ):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] , identifier[abi_type] ))
keyword[if] identifier[value] == identifier[normalized_data] :
keyword[break]
keyword[else] :
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def match_fn(match_values_and_abi, data):
"""Match function used for filtering non-indexed event arguments.
Values provided through the match_values_and_abi parameter are
compared to the abi decoded log data.
"""
(abi_types, all_match_values) = zip(*match_values_and_abi)
decoded_values = decode_abi(abi_types, HexBytes(data))
for (data_value, match_values, abi_type) in zip(decoded_values, all_match_values, abi_types):
if match_values is None:
continue # depends on [control=['if'], data=[]]
normalized_data = normalize_data_values(abi_type, data_value)
for value in match_values:
if not is_encodable(abi_type, value):
raise ValueError('Value {0} is of the wrong abi type. Expected {1} typed value.'.format(value, abi_type)) # depends on [control=['if'], data=[]]
if value == normalized_data:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['value']]
else:
return False # depends on [control=['for'], data=[]]
return True
|
def take_break(minutes: hug.types.number=5):
"""Enables temporarily breaking concentration"""
print("")
print("######################################### ARE YOU SURE? #####################################")
try:
for remaining in range(60, -1, -1):
sys.stdout.write("\r")
sys.stdout.write("{:2d} seconds to change your mind. Won't you prefer programming? Or a book?".format(remaining))
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
print("")
print("")
print(":D :D :D\nGood on you! <3")
return
# The user insisted on breaking concentration.
lose()
print("")
print("######################################### TAKING A BREAK ####################################")
try:
for remaining in range(minutes * 60, -1, -1):
sys.stdout.write("\r")
sys.stdout.write("{:2d} seconds remaining without concentration.".format(remaining))
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
sys.stdout.write("\rEnough distraction! \n")
print("######################################### BREAK OVER :) #####################################")
print("")
improve()
|
def function[take_break, parameter[minutes]]:
constant[Enables temporarily breaking concentration]
call[name[print], parameter[constant[]]]
call[name[print], parameter[constant[######################################### ARE YOU SURE? #####################################]]]
<ast.Try object at 0x7da204345b70>
call[name[lose], parameter[]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[constant[######################################### TAKING A BREAK ####################################]]]
<ast.Try object at 0x7da204347b20>
|
keyword[def] identifier[take_break] ( identifier[minutes] : identifier[hug] . identifier[types] . identifier[number] = literal[int] ):
literal[string]
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[try] :
keyword[for] identifier[remaining] keyword[in] identifier[range] ( literal[int] ,- literal[int] ,- literal[int] ):
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] . identifier[format] ( identifier[remaining] ))
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[time] . identifier[sleep] ( literal[int] )
keyword[except] identifier[KeyboardInterrupt] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[return]
identifier[lose] ()
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[try] :
keyword[for] identifier[remaining] keyword[in] identifier[range] ( identifier[minutes] * literal[int] ,- literal[int] ,- literal[int] ):
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] . identifier[format] ( identifier[remaining] ))
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[time] . identifier[sleep] ( literal[int] )
keyword[except] identifier[KeyboardInterrupt] :
keyword[pass]
keyword[finally] :
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[improve] ()
|
def take_break(minutes: hug.types.number=5):
"""Enables temporarily breaking concentration"""
print('')
print('######################################### ARE YOU SURE? #####################################')
try:
for remaining in range(60, -1, -1):
sys.stdout.write('\r')
sys.stdout.write("{:2d} seconds to change your mind. Won't you prefer programming? Or a book?".format(remaining))
sys.stdout.flush()
time.sleep(1) # depends on [control=['for'], data=['remaining']] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
print('')
print('')
print(':D :D :D\nGood on you! <3')
return # depends on [control=['except'], data=[]]
# The user insisted on breaking concentration.
lose()
print('')
print('######################################### TAKING A BREAK ####################################')
try:
for remaining in range(minutes * 60, -1, -1):
sys.stdout.write('\r')
sys.stdout.write('{:2d} seconds remaining without concentration.'.format(remaining))
sys.stdout.flush()
time.sleep(1) # depends on [control=['for'], data=['remaining']] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
pass # depends on [control=['except'], data=[]]
finally:
sys.stdout.write('\rEnough distraction! \n')
print('######################################### BREAK OVER :) #####################################')
print('')
improve()
|
def pulse_train(time, start, duration, repeat_time, end):
""" Implements vensim's PULSE TRAIN function
In range [-inf, start) returns 0
In range [start + n * repeat_time, start + n * repeat_time + duration) return 1
In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0
"""
t = time()
if start <= t < end:
return 1 if (t - start) % repeat_time < duration else 0
else:
return 0
|
def function[pulse_train, parameter[time, start, duration, repeat_time, end]]:
constant[ Implements vensim's PULSE TRAIN function
In range [-inf, start) returns 0
In range [start + n * repeat_time, start + n * repeat_time + duration) return 1
In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0
]
variable[t] assign[=] call[name[time], parameter[]]
if compare[name[start] less_or_equal[<=] name[t]] begin[:]
return[<ast.IfExp object at 0x7da2045657b0>]
|
keyword[def] identifier[pulse_train] ( identifier[time] , identifier[start] , identifier[duration] , identifier[repeat_time] , identifier[end] ):
literal[string]
identifier[t] = identifier[time] ()
keyword[if] identifier[start] <= identifier[t] < identifier[end] :
keyword[return] literal[int] keyword[if] ( identifier[t] - identifier[start] )% identifier[repeat_time] < identifier[duration] keyword[else] literal[int]
keyword[else] :
keyword[return] literal[int]
|
def pulse_train(time, start, duration, repeat_time, end):
""" Implements vensim's PULSE TRAIN function
In range [-inf, start) returns 0
In range [start + n * repeat_time, start + n * repeat_time + duration) return 1
In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0
"""
t = time()
if start <= t < end:
return 1 if (t - start) % repeat_time < duration else 0 # depends on [control=['if'], data=['start', 't']]
else:
return 0
|
def _load_machines_cache(self):
"""This method should fill up `_machines_cache` from scratch.
It could happen only in two cases:
1. During class initialization
2. When all etcd members failed"""
self._update_machines_cache = True
if 'srv' not in self._config and 'host' not in self._config and 'hosts' not in self._config:
raise Exception('Neither srv, hosts, host nor url are defined in etcd section of config')
self._machines_cache = self._get_machines_cache_from_config()
# Can not bootstrap list of etcd-cluster members, giving up
if not self._machines_cache:
raise etcd.EtcdException
# After filling up initial list of machines_cache we should ask etcd-cluster about actual list
self._base_uri = self._next_server()
self._refresh_machines_cache()
self._update_machines_cache = False
self._machines_cache_updated = time.time()
|
def function[_load_machines_cache, parameter[self]]:
constant[This method should fill up `_machines_cache` from scratch.
It could happen only in two cases:
1. During class initialization
2. When all etcd members failed]
name[self]._update_machines_cache assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b2178be0> begin[:]
<ast.Raise object at 0x7da1b2179750>
name[self]._machines_cache assign[=] call[name[self]._get_machines_cache_from_config, parameter[]]
if <ast.UnaryOp object at 0x7da1b2179270> begin[:]
<ast.Raise object at 0x7da1b2178fd0>
name[self]._base_uri assign[=] call[name[self]._next_server, parameter[]]
call[name[self]._refresh_machines_cache, parameter[]]
name[self]._update_machines_cache assign[=] constant[False]
name[self]._machines_cache_updated assign[=] call[name[time].time, parameter[]]
|
keyword[def] identifier[_load_machines_cache] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_update_machines_cache] = keyword[True]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_config] keyword[and] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_config] keyword[and] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_config] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[_machines_cache] = identifier[self] . identifier[_get_machines_cache_from_config] ()
keyword[if] keyword[not] identifier[self] . identifier[_machines_cache] :
keyword[raise] identifier[etcd] . identifier[EtcdException]
identifier[self] . identifier[_base_uri] = identifier[self] . identifier[_next_server] ()
identifier[self] . identifier[_refresh_machines_cache] ()
identifier[self] . identifier[_update_machines_cache] = keyword[False]
identifier[self] . identifier[_machines_cache_updated] = identifier[time] . identifier[time] ()
|
def _load_machines_cache(self):
"""This method should fill up `_machines_cache` from scratch.
It could happen only in two cases:
1. During class initialization
2. When all etcd members failed"""
self._update_machines_cache = True
if 'srv' not in self._config and 'host' not in self._config and ('hosts' not in self._config):
raise Exception('Neither srv, hosts, host nor url are defined in etcd section of config') # depends on [control=['if'], data=[]]
self._machines_cache = self._get_machines_cache_from_config()
# Can not bootstrap list of etcd-cluster members, giving up
if not self._machines_cache:
raise etcd.EtcdException # depends on [control=['if'], data=[]]
# After filling up initial list of machines_cache we should ask etcd-cluster about actual list
self._base_uri = self._next_server()
self._refresh_machines_cache()
self._update_machines_cache = False
self._machines_cache_updated = time.time()
|
def get_exiobase_files(path, coefficients=True):
""" Gets the EXIOBASE files in path (which can be a zip file)
Parameters
----------
path: str or pathlib.Path
Path to exiobase files or zip file
coefficients: boolean, optional
If True (default), considers the mrIot file as A matrix,
and the extensions as S matrices. Otherwise as Z and F, respectively
Returns
-------
dict of dict
"""
path = os.path.normpath(str(path))
if coefficients:
exio_core_regex = dict(
# don’t match file if starting with _
A=re.compile('(?<!\_)mrIot.*txt'),
Y=re.compile('(?<!\_)mrFinalDemand.*txt'),
S_factor_inputs=re.compile('(?<!\_)mrFactorInputs.*txt'),
S_emissions=re.compile('(?<!\_)mrEmissions.*txt'),
S_materials=re.compile('(?<!\_)mrMaterials.*txt'),
S_resources=re.compile('(?<!\_)mrResources.*txt'),
FY_resources=re.compile('(?<!\_)mrFDResources.*txt'),
FY_emissions=re.compile('(?<!\_)mrFDEmissions.*txt'),
FY_materials=re.compile('(?<!\_)mrFDMaterials.*txt'),
)
else:
exio_core_regex = dict(
# don’t match file if starting with _
Z=re.compile('(?<!\_)mrIot.*txt'),
Y=re.compile('(?<!\_)mrFinalDemand.*txt'),
F_fac=re.compile('(?<!\_)mrFactorInputs.*txt'),
F_emissions=re.compile('(?<!\_)mrEmissions.*txt'),
F_materials=re.compile('(?<!\_)mrMaterials.*txt'),
F_resources=re.compile('(?<!\_)mrResources.*txt'),
FY_emissions=re.compile('(?<!\_)mrFDEmissions.*txt'),
FY_materials=re.compile('(?<!\_)mrFDMaterials.*txt'),
)
repo_content = get_repo_content(path)
exio_files = dict()
for kk, vv in exio_core_regex.items():
found_file = [vv.search(ff).string for ff in repo_content.filelist
if vv.search(ff)]
if len(found_file) > 1:
logging.warning(
"Multiple files found for {}: {}"
" - USING THE FIRST ONE".format(kk, found_file))
found_file = found_file[0:1]
elif len(found_file) == 0:
continue
else:
if repo_content.iszip:
format_para = sniff_csv_format(found_file[0],
zip_file=path)
else:
format_para = sniff_csv_format(os.path.join(path,
found_file[0]))
exio_files[kk] = dict(
root_repo=path,
file_path=found_file[0],
version=get_exiobase12_version(
os.path.basename(found_file[0])),
index_rows=format_para['nr_header_row'],
index_col=format_para['nr_index_col'],
unit_col=format_para['nr_index_col'] - 1,
sep=format_para['sep'])
return exio_files
|
def function[get_exiobase_files, parameter[path, coefficients]]:
constant[ Gets the EXIOBASE files in path (which can be a zip file)
Parameters
----------
path: str or pathlib.Path
Path to exiobase files or zip file
coefficients: boolean, optional
If True (default), considers the mrIot file as A matrix,
and the extensions as S matrices. Otherwise as Z and F, respectively
Returns
-------
dict of dict
]
variable[path] assign[=] call[name[os].path.normpath, parameter[call[name[str], parameter[name[path]]]]]
if name[coefficients] begin[:]
variable[exio_core_regex] assign[=] call[name[dict], parameter[]]
variable[repo_content] assign[=] call[name[get_repo_content], parameter[name[path]]]
variable[exio_files] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b06984f0>, <ast.Name object at 0x7da1b0698430>]]] in starred[call[name[exio_core_regex].items, parameter[]]] begin[:]
variable[found_file] assign[=] <ast.ListComp object at 0x7da1b06980d0>
if compare[call[name[len], parameter[name[found_file]]] greater[>] constant[1]] begin[:]
call[name[logging].warning, parameter[call[constant[Multiple files found for {}: {} - USING THE FIRST ONE].format, parameter[name[kk], name[found_file]]]]]
variable[found_file] assign[=] call[name[found_file]][<ast.Slice object at 0x7da1b0402c50>]
return[name[exio_files]]
|
keyword[def] identifier[get_exiobase_files] ( identifier[path] , identifier[coefficients] = keyword[True] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[str] ( identifier[path] ))
keyword[if] identifier[coefficients] :
identifier[exio_core_regex] = identifier[dict] (
identifier[A] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[Y] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[S_factor_inputs] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[S_emissions] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[S_materials] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[S_resources] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[FY_resources] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[FY_emissions] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[FY_materials] = identifier[re] . identifier[compile] ( literal[string] ),
)
keyword[else] :
identifier[exio_core_regex] = identifier[dict] (
identifier[Z] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[Y] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[F_fac] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[F_emissions] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[F_materials] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[F_resources] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[FY_emissions] = identifier[re] . identifier[compile] ( literal[string] ),
identifier[FY_materials] = identifier[re] . identifier[compile] ( literal[string] ),
)
identifier[repo_content] = identifier[get_repo_content] ( identifier[path] )
identifier[exio_files] = identifier[dict] ()
keyword[for] identifier[kk] , identifier[vv] keyword[in] identifier[exio_core_regex] . identifier[items] ():
identifier[found_file] =[ identifier[vv] . identifier[search] ( identifier[ff] ). identifier[string] keyword[for] identifier[ff] keyword[in] identifier[repo_content] . identifier[filelist]
keyword[if] identifier[vv] . identifier[search] ( identifier[ff] )]
keyword[if] identifier[len] ( identifier[found_file] )> literal[int] :
identifier[logging] . identifier[warning] (
literal[string]
literal[string] . identifier[format] ( identifier[kk] , identifier[found_file] ))
identifier[found_file] = identifier[found_file] [ literal[int] : literal[int] ]
keyword[elif] identifier[len] ( identifier[found_file] )== literal[int] :
keyword[continue]
keyword[else] :
keyword[if] identifier[repo_content] . identifier[iszip] :
identifier[format_para] = identifier[sniff_csv_format] ( identifier[found_file] [ literal[int] ],
identifier[zip_file] = identifier[path] )
keyword[else] :
identifier[format_para] = identifier[sniff_csv_format] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] ,
identifier[found_file] [ literal[int] ]))
identifier[exio_files] [ identifier[kk] ]= identifier[dict] (
identifier[root_repo] = identifier[path] ,
identifier[file_path] = identifier[found_file] [ literal[int] ],
identifier[version] = identifier[get_exiobase12_version] (
identifier[os] . identifier[path] . identifier[basename] ( identifier[found_file] [ literal[int] ])),
identifier[index_rows] = identifier[format_para] [ literal[string] ],
identifier[index_col] = identifier[format_para] [ literal[string] ],
identifier[unit_col] = identifier[format_para] [ literal[string] ]- literal[int] ,
identifier[sep] = identifier[format_para] [ literal[string] ])
keyword[return] identifier[exio_files]
|
def get_exiobase_files(path, coefficients=True):
""" Gets the EXIOBASE files in path (which can be a zip file)
Parameters
----------
path: str or pathlib.Path
Path to exiobase files or zip file
coefficients: boolean, optional
If True (default), considers the mrIot file as A matrix,
and the extensions as S matrices. Otherwise as Z and F, respectively
Returns
-------
dict of dict
"""
path = os.path.normpath(str(path))
if coefficients:
# don’t match file if starting with _
exio_core_regex = dict(A=re.compile('(?<!\\_)mrIot.*txt'), Y=re.compile('(?<!\\_)mrFinalDemand.*txt'), S_factor_inputs=re.compile('(?<!\\_)mrFactorInputs.*txt'), S_emissions=re.compile('(?<!\\_)mrEmissions.*txt'), S_materials=re.compile('(?<!\\_)mrMaterials.*txt'), S_resources=re.compile('(?<!\\_)mrResources.*txt'), FY_resources=re.compile('(?<!\\_)mrFDResources.*txt'), FY_emissions=re.compile('(?<!\\_)mrFDEmissions.*txt'), FY_materials=re.compile('(?<!\\_)mrFDMaterials.*txt')) # depends on [control=['if'], data=[]]
else:
# don’t match file if starting with _
exio_core_regex = dict(Z=re.compile('(?<!\\_)mrIot.*txt'), Y=re.compile('(?<!\\_)mrFinalDemand.*txt'), F_fac=re.compile('(?<!\\_)mrFactorInputs.*txt'), F_emissions=re.compile('(?<!\\_)mrEmissions.*txt'), F_materials=re.compile('(?<!\\_)mrMaterials.*txt'), F_resources=re.compile('(?<!\\_)mrResources.*txt'), FY_emissions=re.compile('(?<!\\_)mrFDEmissions.*txt'), FY_materials=re.compile('(?<!\\_)mrFDMaterials.*txt'))
repo_content = get_repo_content(path)
exio_files = dict()
for (kk, vv) in exio_core_regex.items():
found_file = [vv.search(ff).string for ff in repo_content.filelist if vv.search(ff)]
if len(found_file) > 1:
logging.warning('Multiple files found for {}: {} - USING THE FIRST ONE'.format(kk, found_file))
found_file = found_file[0:1] # depends on [control=['if'], data=[]]
elif len(found_file) == 0:
continue # depends on [control=['if'], data=[]]
else:
if repo_content.iszip:
format_para = sniff_csv_format(found_file[0], zip_file=path) # depends on [control=['if'], data=[]]
else:
format_para = sniff_csv_format(os.path.join(path, found_file[0]))
exio_files[kk] = dict(root_repo=path, file_path=found_file[0], version=get_exiobase12_version(os.path.basename(found_file[0])), index_rows=format_para['nr_header_row'], index_col=format_para['nr_index_col'], unit_col=format_para['nr_index_col'] - 1, sep=format_para['sep']) # depends on [control=['for'], data=[]]
return exio_files
|
def addSettingsMenu(menuName, parentMenuFunction=None):
'''
Adds a 'open settings...' menu to the plugin menu.
This method should be called from the initGui() method of the plugin
:param menuName: The name of the plugin menu in which the settings menu is to be added
:param parentMenuFunction: a function from QgisInterface to indicate where to put the container plugin menu.
If not passed, it uses addPluginToMenu
'''
parentMenuFunction = parentMenuFunction or iface.addPluginToMenu
namespace = _callerName().split(".")[0]
settingsAction = QAction(
QgsApplication.getThemeIcon('/mActionOptions.svg'),
"Plugin Settings...",
iface.mainWindow())
settingsAction.setObjectName(namespace + "settings")
settingsAction.triggered.connect(lambda: openSettingsDialog(namespace))
parentMenuFunction(menuName, settingsAction)
global _settingActions
_settingActions[menuName] = settingsAction
|
def function[addSettingsMenu, parameter[menuName, parentMenuFunction]]:
constant[
Adds a 'open settings...' menu to the plugin menu.
This method should be called from the initGui() method of the plugin
:param menuName: The name of the plugin menu in which the settings menu is to be added
:param parentMenuFunction: a function from QgisInterface to indicate where to put the container plugin menu.
If not passed, it uses addPluginToMenu
]
variable[parentMenuFunction] assign[=] <ast.BoolOp object at 0x7da1b0fc4910>
variable[namespace] assign[=] call[call[call[name[_callerName], parameter[]].split, parameter[constant[.]]]][constant[0]]
variable[settingsAction] assign[=] call[name[QAction], parameter[call[name[QgsApplication].getThemeIcon, parameter[constant[/mActionOptions.svg]]], constant[Plugin Settings...], call[name[iface].mainWindow, parameter[]]]]
call[name[settingsAction].setObjectName, parameter[binary_operation[name[namespace] + constant[settings]]]]
call[name[settingsAction].triggered.connect, parameter[<ast.Lambda object at 0x7da1b0fc41c0>]]
call[name[parentMenuFunction], parameter[name[menuName], name[settingsAction]]]
<ast.Global object at 0x7da1b0fc5810>
call[name[_settingActions]][name[menuName]] assign[=] name[settingsAction]
|
keyword[def] identifier[addSettingsMenu] ( identifier[menuName] , identifier[parentMenuFunction] = keyword[None] ):
literal[string]
identifier[parentMenuFunction] = identifier[parentMenuFunction] keyword[or] identifier[iface] . identifier[addPluginToMenu]
identifier[namespace] = identifier[_callerName] (). identifier[split] ( literal[string] )[ literal[int] ]
identifier[settingsAction] = identifier[QAction] (
identifier[QgsApplication] . identifier[getThemeIcon] ( literal[string] ),
literal[string] ,
identifier[iface] . identifier[mainWindow] ())
identifier[settingsAction] . identifier[setObjectName] ( identifier[namespace] + literal[string] )
identifier[settingsAction] . identifier[triggered] . identifier[connect] ( keyword[lambda] : identifier[openSettingsDialog] ( identifier[namespace] ))
identifier[parentMenuFunction] ( identifier[menuName] , identifier[settingsAction] )
keyword[global] identifier[_settingActions]
identifier[_settingActions] [ identifier[menuName] ]= identifier[settingsAction]
|
def addSettingsMenu(menuName, parentMenuFunction=None):
"""
Adds a 'open settings...' menu to the plugin menu.
This method should be called from the initGui() method of the plugin
:param menuName: The name of the plugin menu in which the settings menu is to be added
:param parentMenuFunction: a function from QgisInterface to indicate where to put the container plugin menu.
If not passed, it uses addPluginToMenu
"""
parentMenuFunction = parentMenuFunction or iface.addPluginToMenu
namespace = _callerName().split('.')[0]
settingsAction = QAction(QgsApplication.getThemeIcon('/mActionOptions.svg'), 'Plugin Settings...', iface.mainWindow())
settingsAction.setObjectName(namespace + 'settings')
settingsAction.triggered.connect(lambda : openSettingsDialog(namespace))
parentMenuFunction(menuName, settingsAction)
global _settingActions
_settingActions[menuName] = settingsAction
|
def get_stocks(self, symbols: List[str]) -> List[Commodity]:
""" loads stocks by symbol """
query = (
self.query
.filter(Commodity.mnemonic.in_(symbols))
).order_by(Commodity.namespace, Commodity.mnemonic)
return query.all()
|
def function[get_stocks, parameter[self, symbols]]:
constant[ loads stocks by symbol ]
variable[query] assign[=] call[call[name[self].query.filter, parameter[call[name[Commodity].mnemonic.in_, parameter[name[symbols]]]]].order_by, parameter[name[Commodity].namespace, name[Commodity].mnemonic]]
return[call[name[query].all, parameter[]]]
|
keyword[def] identifier[get_stocks] ( identifier[self] , identifier[symbols] : identifier[List] [ identifier[str] ])-> identifier[List] [ identifier[Commodity] ]:
literal[string]
identifier[query] =(
identifier[self] . identifier[query]
. identifier[filter] ( identifier[Commodity] . identifier[mnemonic] . identifier[in_] ( identifier[symbols] ))
). identifier[order_by] ( identifier[Commodity] . identifier[namespace] , identifier[Commodity] . identifier[mnemonic] )
keyword[return] identifier[query] . identifier[all] ()
|
def get_stocks(self, symbols: List[str]) -> List[Commodity]:
""" loads stocks by symbol """
query = self.query.filter(Commodity.mnemonic.in_(symbols)).order_by(Commodity.namespace, Commodity.mnemonic)
return query.all()
|
def template_instances(cls, dataset, capacity=0):
"""
Uses the Instances as template to create an empty dataset.
:param dataset: the original dataset
:type dataset: Instances
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the empty dataset
:rtype: Instances
"""
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Lweka/core/Instances;I)V", dataset.jobject, capacity))
|
def function[template_instances, parameter[cls, dataset, capacity]]:
constant[
Uses the Instances as template to create an empty dataset.
:param dataset: the original dataset
:type dataset: Instances
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the empty dataset
:rtype: Instances
]
return[call[name[Instances], parameter[call[name[javabridge].make_instance, parameter[constant[weka/core/Instances], constant[(Lweka/core/Instances;I)V], name[dataset].jobject, name[capacity]]]]]]
|
keyword[def] identifier[template_instances] ( identifier[cls] , identifier[dataset] , identifier[capacity] = literal[int] ):
literal[string]
keyword[return] identifier[Instances] (
identifier[javabridge] . identifier[make_instance] (
literal[string] , literal[string] , identifier[dataset] . identifier[jobject] , identifier[capacity] ))
|
def template_instances(cls, dataset, capacity=0):
"""
Uses the Instances as template to create an empty dataset.
:param dataset: the original dataset
:type dataset: Instances
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the empty dataset
:rtype: Instances
"""
return Instances(javabridge.make_instance('weka/core/Instances', '(Lweka/core/Instances;I)V', dataset.jobject, capacity))
|
def firmware_autoupgrade_params_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware")
autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params")
protocol = ET.SubElement(autoupgrade_params, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def function[firmware_autoupgrade_params_protocol, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[firmware] assign[=] call[name[ET].SubElement, parameter[name[config], constant[firmware]]]
variable[autoupgrade_params] assign[=] call[name[ET].SubElement, parameter[name[firmware], constant[autoupgrade-params]]]
variable[protocol] assign[=] call[name[ET].SubElement, parameter[name[autoupgrade_params], constant[protocol]]]
name[protocol].text assign[=] call[name[kwargs].pop, parameter[constant[protocol]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]]
|
keyword[def] identifier[firmware_autoupgrade_params_protocol] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[firmware] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[autoupgrade_params] = identifier[ET] . identifier[SubElement] ( identifier[firmware] , literal[string] )
identifier[protocol] = identifier[ET] . identifier[SubElement] ( identifier[autoupgrade_params] , literal[string] )
identifier[protocol] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] )
|
def firmware_autoupgrade_params_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
firmware = ET.SubElement(config, 'firmware', xmlns='urn:brocade.com:mgmt:brocade-firmware')
autoupgrade_params = ET.SubElement(firmware, 'autoupgrade-params')
protocol = ET.SubElement(autoupgrade_params, 'protocol')
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def getRelationships(self, pid, subject=None, predicate=None, format=None):
'''Get information about relationships on an object.
Wrapper function for
`Fedora REST API getRelationships <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-getRelationships>`_
:param pid: object pid
:param subject: subject (optional)
:param predicate: predicate (optional)
:param format: format
:rtype: :class:`requests.models.Response`
'''
http_args = {}
if subject is not None:
http_args['subject'] = subject
if predicate is not None:
http_args['predicate'] = predicate
if format is not None:
http_args['format'] = format
url = 'objects/%(pid)s/relationships' % {'pid': pid}
return self.get(url, params=http_args)
|
def function[getRelationships, parameter[self, pid, subject, predicate, format]]:
constant[Get information about relationships on an object.
Wrapper function for
`Fedora REST API getRelationships <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-getRelationships>`_
:param pid: object pid
:param subject: subject (optional)
:param predicate: predicate (optional)
:param format: format
:rtype: :class:`requests.models.Response`
]
variable[http_args] assign[=] dictionary[[], []]
if compare[name[subject] is_not constant[None]] begin[:]
call[name[http_args]][constant[subject]] assign[=] name[subject]
if compare[name[predicate] is_not constant[None]] begin[:]
call[name[http_args]][constant[predicate]] assign[=] name[predicate]
if compare[name[format] is_not constant[None]] begin[:]
call[name[http_args]][constant[format]] assign[=] name[format]
variable[url] assign[=] binary_operation[constant[objects/%(pid)s/relationships] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b26e3070>], [<ast.Name object at 0x7da1b26e3ca0>]]]
return[call[name[self].get, parameter[name[url]]]]
|
keyword[def] identifier[getRelationships] ( identifier[self] , identifier[pid] , identifier[subject] = keyword[None] , identifier[predicate] = keyword[None] , identifier[format] = keyword[None] ):
literal[string]
identifier[http_args] ={}
keyword[if] identifier[subject] keyword[is] keyword[not] keyword[None] :
identifier[http_args] [ literal[string] ]= identifier[subject]
keyword[if] identifier[predicate] keyword[is] keyword[not] keyword[None] :
identifier[http_args] [ literal[string] ]= identifier[predicate]
keyword[if] identifier[format] keyword[is] keyword[not] keyword[None] :
identifier[http_args] [ literal[string] ]= identifier[format]
identifier[url] = literal[string] %{ literal[string] : identifier[pid] }
keyword[return] identifier[self] . identifier[get] ( identifier[url] , identifier[params] = identifier[http_args] )
|
def getRelationships(self, pid, subject=None, predicate=None, format=None):
"""Get information about relationships on an object.
Wrapper function for
`Fedora REST API getRelationships <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-getRelationships>`_
:param pid: object pid
:param subject: subject (optional)
:param predicate: predicate (optional)
:param format: format
:rtype: :class:`requests.models.Response`
"""
http_args = {}
if subject is not None:
http_args['subject'] = subject # depends on [control=['if'], data=['subject']]
if predicate is not None:
http_args['predicate'] = predicate # depends on [control=['if'], data=['predicate']]
if format is not None:
http_args['format'] = format # depends on [control=['if'], data=['format']]
url = 'objects/%(pid)s/relationships' % {'pid': pid}
return self.get(url, params=http_args)
|
def get_subs(subs_file='subreddits.txt', blacklist_file='blacklist.txt') -> List[str]:
"""
Get subs based on a file of subreddits and a file of blacklisted subreddits.
:param subs_file: List of subreddits. Each sub in a new line.
:param blacklist_file: List of blacklisted subreddits. Each sub in a new line.
:return: List of subreddits filtered with the blacklisted subs.
**Example files**::
sub0
sub1
sub2
...
"""
# Get subs and blacklisted subs
subsf = open(subs_file)
blacklf = open(blacklist_file)
subs = [b.lower().replace('\n','') for b in subsf.readlines()]
blacklisted = [b.lower().replace('\n','') for b in blacklf.readlines()]
subsf.close()
blacklf.close()
# Filter blacklisted
subs_filtered = list(sorted(set(subs).difference(set(blacklisted))))
return subs_filtered
|
def function[get_subs, parameter[subs_file, blacklist_file]]:
constant[
Get subs based on a file of subreddits and a file of blacklisted subreddits.
:param subs_file: List of subreddits. Each sub in a new line.
:param blacklist_file: List of blacklisted subreddits. Each sub in a new line.
:return: List of subreddits filtered with the blacklisted subs.
**Example files**::
sub0
sub1
sub2
...
]
variable[subsf] assign[=] call[name[open], parameter[name[subs_file]]]
variable[blacklf] assign[=] call[name[open], parameter[name[blacklist_file]]]
variable[subs] assign[=] <ast.ListComp object at 0x7da1b1ec29e0>
variable[blacklisted] assign[=] <ast.ListComp object at 0x7da1b1ecb3d0>
call[name[subsf].close, parameter[]]
call[name[blacklf].close, parameter[]]
variable[subs_filtered] assign[=] call[name[list], parameter[call[name[sorted], parameter[call[call[name[set], parameter[name[subs]]].difference, parameter[call[name[set], parameter[name[blacklisted]]]]]]]]]
return[name[subs_filtered]]
|
keyword[def] identifier[get_subs] ( identifier[subs_file] = literal[string] , identifier[blacklist_file] = literal[string] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[subsf] = identifier[open] ( identifier[subs_file] )
identifier[blacklf] = identifier[open] ( identifier[blacklist_file] )
identifier[subs] =[ identifier[b] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[b] keyword[in] identifier[subsf] . identifier[readlines] ()]
identifier[blacklisted] =[ identifier[b] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[b] keyword[in] identifier[blacklf] . identifier[readlines] ()]
identifier[subsf] . identifier[close] ()
identifier[blacklf] . identifier[close] ()
identifier[subs_filtered] = identifier[list] ( identifier[sorted] ( identifier[set] ( identifier[subs] ). identifier[difference] ( identifier[set] ( identifier[blacklisted] ))))
keyword[return] identifier[subs_filtered]
|
def get_subs(subs_file='subreddits.txt', blacklist_file='blacklist.txt') -> List[str]:
"""
Get subs based on a file of subreddits and a file of blacklisted subreddits.
:param subs_file: List of subreddits. Each sub in a new line.
:param blacklist_file: List of blacklisted subreddits. Each sub in a new line.
:return: List of subreddits filtered with the blacklisted subs.
**Example files**::
sub0
sub1
sub2
...
"""
# Get subs and blacklisted subs
subsf = open(subs_file)
blacklf = open(blacklist_file)
subs = [b.lower().replace('\n', '') for b in subsf.readlines()]
blacklisted = [b.lower().replace('\n', '') for b in blacklf.readlines()]
subsf.close()
blacklf.close()
# Filter blacklisted
subs_filtered = list(sorted(set(subs).difference(set(blacklisted))))
return subs_filtered
|
def community_topic_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/topics#show-topic"
api_path = "/api/v2/community/topics/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs)
|
def function[community_topic_show, parameter[self, id]]:
constant[https://developer.zendesk.com/rest_api/docs/help_center/topics#show-topic]
variable[api_path] assign[=] constant[/api/v2/community/topics/{id}.json]
variable[api_path] assign[=] call[name[api_path].format, parameter[]]
return[call[name[self].call, parameter[name[api_path]]]]
|
keyword[def] identifier[community_topic_show] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[id] = identifier[id] )
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] ,** identifier[kwargs] )
|
def community_topic_show(self, id, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/help_center/topics#show-topic"""
api_path = '/api/v2/community/topics/{id}.json'
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs)
|
def graph_easy(self):
"""Draw ascii diagram. graph-easy perl module require
"""
if not os.path.isfile("/usr/bin/graph-easy"):
print("Require 'graph-easy': Install with 'slpkg -s sbo "
"graph-easy'")
self.remove_dot()
raise SystemExit()
subprocess.call("graph-easy {0}.dot".format(self.image), shell=True)
self.remove_dot()
raise SystemExit()
|
def function[graph_easy, parameter[self]]:
constant[Draw ascii diagram. graph-easy perl module require
]
if <ast.UnaryOp object at 0x7da2044c0370> begin[:]
call[name[print], parameter[constant[Require 'graph-easy': Install with 'slpkg -s sbo graph-easy']]]
call[name[self].remove_dot, parameter[]]
<ast.Raise object at 0x7da2044c2170>
call[name[subprocess].call, parameter[call[constant[graph-easy {0}.dot].format, parameter[name[self].image]]]]
call[name[self].remove_dot, parameter[]]
<ast.Raise object at 0x7da2044c2590>
|
keyword[def] identifier[graph_easy] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( literal[string] ):
identifier[print] ( literal[string]
literal[string] )
identifier[self] . identifier[remove_dot] ()
keyword[raise] identifier[SystemExit] ()
identifier[subprocess] . identifier[call] ( literal[string] . identifier[format] ( identifier[self] . identifier[image] ), identifier[shell] = keyword[True] )
identifier[self] . identifier[remove_dot] ()
keyword[raise] identifier[SystemExit] ()
|
def graph_easy(self):
"""Draw ascii diagram. graph-easy perl module require
"""
if not os.path.isfile('/usr/bin/graph-easy'):
print("Require 'graph-easy': Install with 'slpkg -s sbo graph-easy'")
self.remove_dot()
raise SystemExit() # depends on [control=['if'], data=[]]
subprocess.call('graph-easy {0}.dot'.format(self.image), shell=True)
self.remove_dot()
raise SystemExit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.