text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def write_extent(self):
"""After the extent selection, save the extent and disconnect signals.
"""
self.extent_dialog.accept()
self.extent_dialog.clear_extent.disconnect(
self.parent.dock.extent.clear_user_analysis_extent)
self.extent_dialog.extent_defined.disconnect(
self.parent.dock.define_user_analysis_extent)
self.extent_dialog.capture_button.clicked.disconnect(
self.start_capture_coordinates)
self.extent_dialog.tool.rectangle_created.disconnect(
self.stop_capture_coordinates) | [
"def",
"write_extent",
"(",
"self",
")",
":",
"self",
".",
"extent_dialog",
".",
"accept",
"(",
")",
"self",
".",
"extent_dialog",
".",
"clear_extent",
".",
"disconnect",
"(",
"self",
".",
"parent",
".",
"dock",
".",
"extent",
".",
"clear_user_analysis_exten... | 48.166667 | 11.083333 |
def unwrap(value):
"""Iterate an NTTable
:returns: An iterator yielding an OrderedDict for each column
"""
ret = []
# build lists of column names, and value
lbl, cols = [], []
for cname, cval in value.value.items():
lbl.append(cname)
cols.append(cval)
# zip together column arrays to iterate over rows
for rval in izip(*cols):
# zip together column names and row values
ret.append(OrderedDict(zip(lbl, rval)))
return ret | [
"def",
"unwrap",
"(",
"value",
")",
":",
"ret",
"=",
"[",
"]",
"# build lists of column names, and value",
"lbl",
",",
"cols",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"cname",
",",
"cval",
"in",
"value",
".",
"value",
".",
"items",
"(",
")",
":",
"lbl",... | 28.105263 | 18.842105 |
def read_excel(file_name, offset=1, sheet_index=0):
"""
读取 Excel
:param sheet_index:
:param file_name:
:param offset: 偏移,一般第一行是表头,不需要读取数据
:return:
"""
try:
workbook = xlrd.open_workbook(file_name)
except Exception as e:
return None
if len(workbook.sheets()) <= 0:
return []
sh = workbook.sheets()[sheet_index]
raw_data = []
n_rows = sh.nrows
row = sh.row_values(0)
header = []
for t in row:
t = t.strip().lower()
header.append(t)
# n_cols = sh.ncols
# 第0行是提示信息和标题,跳过
for i in range(offset, n_rows):
try:
row = sh.row_values(i)
d = {}
for j, t in enumerate(header):
d[t] = row[j]
raw_data.append(d)
except Exception as e:
pass
return raw_data | [
"def",
"read_excel",
"(",
"file_name",
",",
"offset",
"=",
"1",
",",
"sheet_index",
"=",
"0",
")",
":",
"try",
":",
"workbook",
"=",
"xlrd",
".",
"open_workbook",
"(",
"file_name",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"None",
"if",
"le... | 21.025641 | 18.358974 |
def _traverse_parent_objs(self, goobj_child):
"""Traverse from source GO up parents."""
child_id = goobj_child.id
# mark child as seen
self.seen_cids.add(child_id)
self.godag.go2obj[child_id] = goobj_child
# Loop through parents of child object
for parent_obj in goobj_child.parents:
parent_id = parent_obj.id
self.godag.p_from_cs[parent_id].add(child_id)
# If parent has not been seen, traverse
if parent_id not in self.seen_cids:
self._traverse_parent_objs(parent_obj) | [
"def",
"_traverse_parent_objs",
"(",
"self",
",",
"goobj_child",
")",
":",
"child_id",
"=",
"goobj_child",
".",
"id",
"# mark child as seen",
"self",
".",
"seen_cids",
".",
"add",
"(",
"child_id",
")",
"self",
".",
"godag",
".",
"go2obj",
"[",
"child_id",
"]... | 44.461538 | 7.769231 |
def app_list(**kwargs):
"""
Show uploaded applications.
"""
ctx = Context(**kwargs)
ctx.execute_action('app:list', **{
'storage': ctx.repo.create_secure_service('storage'),
}) | [
"def",
"app_list",
"(",
"*",
"*",
"kwargs",
")",
":",
"ctx",
"=",
"Context",
"(",
"*",
"*",
"kwargs",
")",
"ctx",
".",
"execute_action",
"(",
"'app:list'",
",",
"*",
"*",
"{",
"'storage'",
":",
"ctx",
".",
"repo",
".",
"create_secure_service",
"(",
"... | 25 | 12 |
def pretty(self, start, end, e, messages=None):
"""Pretties up the output error message so it is readable
and designates where the error came from"""
log.debug("Displaying document from lines '%i' to '%i'", start, end)
errorlist = []
if len(e.context) > 0:
errorlist = e.context
else:
errorlist.append(e)
for error in errorlist:
validator = error.validator
if validator == "required":
# Handle required fields
msg = error.message
messages.append("Between lines %d - %d. %s" % (start, end, msg))
elif validator == "additionalProperties":
# Handle additional properties not allowed
if len(error.message) > 256:
msg = error.message[:253] + "..."
else:
msg = error.message
messages.append("Between lines %d - %d. %s" % (start, end, msg))
elif len(error.relative_path) > 0:
# Handle other cases where we can loop through the lines
# get the JSON path to traverse through the file
jsonpath = error.relative_path
array_index = 0
current_start = start
foundline = 0
found = False
context = collections.deque(maxlen=20)
tag = " <<<<<<<<< Expects: %s <<<<<<<<<\n"""
for cnt, path in enumerate(error.relative_path):
# Need to set the key we are looking, and then check the array count
# if it is an array, we have some interesting checks to do
if int(cnt) % 2 == 0:
# we know we have some array account
# array_index keeps track of the array count we are looking for or number
# of matches we need to skip over before we get to the one we care about
# check if previous array_index > 0. if so, then we know we need to use
# that one to track down the specific instance of this nested key.
# later on, we utilize this array_index loop through
# if array_index == 0:
array_index = jsonpath[cnt]
match_count = 0
continue
elif int(cnt) % 2 == 1:
# we know we have some key name
# current_key keeps track of the key we are looking for in the JSON Path
current_key = jsonpath[cnt]
for linenum in range(current_start, end):
line = linecache.getline(self.ymlfile, linenum)
# Check if line contains the error
if ":" in line:
l = line.split(':')
key = l[0]
value = ':'.join(l[1:])
# TODO:
# Handle maxItems TBD
# Handle minItems TBD
# Handle in-order (bytes) TBD
# Handle uniqueness TBD
# Handle cases where key in yml file is hexadecimal
try:
key = int(key.strip(), 16)
except ValueError:
key = key.strip()
if str(key) == current_key:
# check if we are at our match_count and end of the path
if match_count == array_index:
# check if we are at end of the jsonpath
if cnt == len(jsonpath)-1:
# we are at the end of path so let's stop here'
if error.validator == "type":
if value.strip() == str(error.instance):
errormsg = "Value '%s' should be of type '%s'" % (error.instance, str(error.validator_value))
line = line.replace("\n", (tag % errormsg))
foundline = linenum
found = True
elif value.strip() == "" and error.instance is None:
errormsg = "Missing value for %s." % key
line = line.replace("\n", (tag % errormsg))
foundline = linenum
found = True
elif not found:
# print "EXTRA FOO"
# print match_count
# print array_index
# print current_key
# print line
# otherwise change the start to the current line
current_start = linenum
break
match_count += 1
# for the context queue, we want to get the error to appear in
# the middle of the error output. to do so, we will only append
# to the queue in 2 cases:
#
# 1. before we find the error (found == False). we can
# just keep pushing on the queue until we find it in the YAML.
# 2. once we find the error (found == True), we just want to push
# onto the queue until the the line is in the middle
if not found or (found and context.maxlen > (linenum-foundline)*2):
context.append(line)
elif found and context.maxlen <= (linenum-foundline)*2:
break
# Loop through the queue and generate a readable msg output
out = ""
for line in context:
out += line
if foundline:
msg = "Error found on line %d in %s:\n\n%s" % (foundline, self.ymlfile, out)
messages.append(msg)
# reset the line it was found on and the context
foundline = 0
context.clear()
linecache.clearcache()
else:
messages.append(error.message) | [
"def",
"pretty",
"(",
"self",
",",
"start",
",",
"end",
",",
"e",
",",
"messages",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"\"Displaying document from lines '%i' to '%i'\"",
",",
"start",
",",
"end",
")",
"errorlist",
"=",
"[",
"]",
"if",
"len",... | 47.806897 | 24.572414 |
def add_record(post_id, catalog_id, order=0):
'''
Create the record of post 2 tag, and update the count in g_tag.
'''
rec = MPost2Catalog.__get_by_info(post_id, catalog_id)
if rec:
entry = TabPost2Tag.update(
order=order,
# For migration. the value should be added when created.
par_id=rec.tag_id[:2] + '00',
).where(TabPost2Tag.uid == rec.uid)
entry.execute()
else:
TabPost2Tag.create(
uid=tools.get_uuid(),
par_id=catalog_id[:2] + '00',
post_id=post_id,
tag_id=catalog_id,
order=order,
)
MCategory.update_count(catalog_id) | [
"def",
"add_record",
"(",
"post_id",
",",
"catalog_id",
",",
"order",
"=",
"0",
")",
":",
"rec",
"=",
"MPost2Catalog",
".",
"__get_by_info",
"(",
"post_id",
",",
"catalog_id",
")",
"if",
"rec",
":",
"entry",
"=",
"TabPost2Tag",
".",
"update",
"(",
"order... | 32.521739 | 16.956522 |
def DbGetHostList(self, argin):
""" Get host list with name matching the specified filter
:param argin: The filter
:type: tango.DevString
:return: Host name list
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetHostList()")
argin = replace_wildcard(argin)
return self.db.get_host_list(argin) | [
"def",
"DbGetHostList",
"(",
"self",
",",
"argin",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"In DbGetHostList()\"",
")",
"argin",
"=",
"replace_wildcard",
"(",
"argin",
")",
"return",
"self",
".",
"db",
".",
"get_host_list",
"(",
"argin",
")"
] | 35.9 | 8.5 |
def process_args():
"""
Parse command-line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-I', type=str,
metavar='<Include directory>',
action='append',
help='Directory to be searched for included files')
parser.add_argument('lems_file', type=str, metavar='<LEMS file>',
help='LEMS file to be simulated')
parser.add_argument('-nogui',
action='store_true',
help="If this is specified, just parse & simulate the model, but don't show any plots")
parser.add_argument('-dlems',
action='store_true',
help="If this is specified, export the LEMS file as "+dlems_info)
return parser.parse_args() | [
"def",
"process_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'-I'",
",",
"type",
"=",
"str",
",",
"metavar",
"=",
"'<Include directory>'",
",",
"action",
"=",
"'append'",
",",
"help"... | 38.5 | 17.458333 |
def hash(self):
"signatures are non deterministic"
if self.sender is None:
raise MissingSignatureError()
class HashSerializable(rlp.Serializable):
fields = [(field, sedes) for field, sedes in self.fields
if field not in ('v', 'r', 's')] + [('_sender', binary)]
_sedes = None
return sha3(rlp.encode(self, HashSerializable)) | [
"def",
"hash",
"(",
"self",
")",
":",
"if",
"self",
".",
"sender",
"is",
"None",
":",
"raise",
"MissingSignatureError",
"(",
")",
"class",
"HashSerializable",
"(",
"rlp",
".",
"Serializable",
")",
":",
"fields",
"=",
"[",
"(",
"field",
",",
"sedes",
")... | 40.4 | 18.2 |
def clean_meta(self, meta):
"""Removes unwanted metadata
Parameters
----------
meta : dict
Notebook metadata.
"""
if not self.verbose_metadata:
default_kernel_name = (self.default_kernel_name or
self._km.kernel_name)
if (meta.get("kernelspec", {})
.get("name", None) == default_kernel_name):
del meta["kernelspec"]
meta.pop("language_info", None)
return meta | [
"def",
"clean_meta",
"(",
"self",
",",
"meta",
")",
":",
"if",
"not",
"self",
".",
"verbose_metadata",
":",
"default_kernel_name",
"=",
"(",
"self",
".",
"default_kernel_name",
"or",
"self",
".",
"_km",
".",
"kernel_name",
")",
"if",
"(",
"meta",
".",
"g... | 27.526316 | 18.105263 |
def from_section(cls, config, section, **kwargs):
"""
Creates a :class:`~vsgen.project.VSGProject` from a :class:`~configparser.ConfigParser` section.
:param ConfigParser config: A :class:`~configparser.ConfigParser` instance.
:param str section: A :class:`~configparser.ConfigParser` section key.
:param kwargs: List of additional keyworded arguments to be passed into the :class:`~vsgen.project.VSGProject`.
:return: A valid :class:`~vsgen.project.VSGProject` instance if succesful; None otherwise.
"""
p = cls(**kwargs)
p.Name = config.get(section, 'name', fallback=p.Name)
p.FileName = config.getfile(section, 'filename', fallback=p.FileName)
p.SearchPath = config.getdirs(section, 'search_path', fallback=p.SearchPath)
p.OutputPath = config.getdir(section, 'output_path', fallback=p.OutputPath)
p.WorkingDirectory = config.getdir(section, 'working_directory', fallback=p.WorkingDirectory)
p.RootNamespace = config.get(section, 'root_namespace', fallback=p.RootNamespace)
p.ProjectHome = config.getdir(section, 'project_home', fallback=p.ProjectHome)
p.StartupFile = config.getfile(section, 'startup_file', fallback=p.StartupFile)
p.CompileFiles = config.getlist(section, 'compile_files', fallback=p.CompileFiles)
p.ContentFiles = config.getlist(section, 'content_files', fallback=p.ContentFiles)
p.CompileInFilter = config.getlist(section, 'compile_in_filter', fallback=p.CompileInFilter)
p.CompileExFilter = config.getlist(section, 'compile_ex_filter', fallback=p.CompileExFilter)
p.ContentInFilter = config.getlist(section, 'content_in_filter', fallback=p.ContentInFilter)
p.ContentExFilter = config.getlist(section, 'content_ex_filter', fallback=p.ContentExFilter)
p.DirectoryInFilter = config.getlist(section, 'directory_in_filter', fallback=p.DirectoryInFilter)
p.DirectoryExFilter = config.getlist(section, 'directory_ex_filter', fallback=p.DirectoryExFilter)
root_path = config.get(section, 'root_path', fallback="")
p.insert_files(root_path)
return p | [
"def",
"from_section",
"(",
"cls",
",",
"config",
",",
"section",
",",
"*",
"*",
"kwargs",
")",
":",
"p",
"=",
"cls",
"(",
"*",
"*",
"kwargs",
")",
"p",
".",
"Name",
"=",
"config",
".",
"get",
"(",
"section",
",",
"'name'",
",",
"fallback",
"=",
... | 68.75 | 43.4375 |
def str_get(x, i):
"""Extract a character from each sample at the specified position from a string column.
Note that if the specified position is out of bound of the string sample, this method returns '', while pandas retunrs nan.
:param int i: The index location, at which to extract the character.
:returns: an expression containing the extracted characters.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.get(5)
Expression = str_get(text, 5)
Length: 5 dtype: str (expression)
---------------------------------
0 h
1 p
2 m
3
4
"""
x = _to_string_sequence(x)
if i == -1:
sl = x.slice_string_end(-1)
else:
sl = x.slice_string(i, i+1)
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) | [
"def",
"str_get",
"(",
"x",
",",
"i",
")",
":",
"x",
"=",
"_to_string_sequence",
"(",
"x",
")",
"if",
"i",
"==",
"-",
"1",
":",
"sl",
"=",
"x",
".",
"slice_string_end",
"(",
"-",
"1",
")",
"else",
":",
"sl",
"=",
"x",
".",
"slice_string",
"(",
... | 28.361111 | 25.083333 |
def desc(self):
"""Get a short description of the automation."""
# Auto Away (1) - Location - Enabled
active = 'inactive'
if self.is_active:
active = 'active'
return '{0} (ID: {1}) - {2} - {3}'.format(
self.name, self.automation_id, self.type, active) | [
"def",
"desc",
"(",
"self",
")",
":",
"# Auto Away (1) - Location - Enabled",
"active",
"=",
"'inactive'",
"if",
"self",
".",
"is_active",
":",
"active",
"=",
"'active'",
"return",
"'{0} (ID: {1}) - {2} - {3}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"sel... | 34.222222 | 15.333333 |
def update_search_space(self, search_space):
"""
Update search space definition in tuner by search_space in parameters.
Will called when first setup experiemnt or update search space in WebUI.
Parameters
----------
search_space : dict
"""
self.json = search_space
search_space_instance = json2space(self.json)
rstate = np.random.RandomState()
trials = hp.Trials()
domain = hp.Domain(None, search_space_instance,
pass_expr_memo_ctrl=None)
algorithm = self._choose_tuner(self.algorithm_name)
self.rval = hp.FMinIter(algorithm, domain, trials,
max_evals=-1, rstate=rstate, verbose=0)
self.rval.catch_eval_exceptions = False | [
"def",
"update_search_space",
"(",
"self",
",",
"search_space",
")",
":",
"self",
".",
"json",
"=",
"search_space",
"search_space_instance",
"=",
"json2space",
"(",
"self",
".",
"json",
")",
"rstate",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
")",
... | 39.1 | 17.7 |
def population_analysis_summary_report(feature, parent):
"""Retrieve an HTML population analysis table report from a multi exposure
analysis.
"""
_ = feature, parent # NOQA
analysis_dir = get_analysis_dir(exposure_population['key'])
if analysis_dir:
return get_impact_report_as_string(analysis_dir)
return None | [
"def",
"population_analysis_summary_report",
"(",
"feature",
",",
"parent",
")",
":",
"_",
"=",
"feature",
",",
"parent",
"# NOQA",
"analysis_dir",
"=",
"get_analysis_dir",
"(",
"exposure_population",
"[",
"'key'",
"]",
")",
"if",
"analysis_dir",
":",
"return",
... | 37.666667 | 15.111111 |
def items2file(items, filename, encoding='utf-8', modifier='w'):
"""
json array to file, canonical json format
"""
with codecs.open(filename, modifier, encoding=encoding) as f:
for item in items:
f.write(u"{}\n".format(json.dumps(
item, ensure_ascii=False, sort_keys=True))) | [
"def",
"items2file",
"(",
"items",
",",
"filename",
",",
"encoding",
"=",
"'utf-8'",
",",
"modifier",
"=",
"'w'",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"filename",
",",
"modifier",
",",
"encoding",
"=",
"encoding",
")",
"as",
"f",
":",
"for",
... | 40.375 | 12.125 |
def Nu_Ornatsky(Re, Pr_b, Pr_w, rho_w=None, rho_b=None):
r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_ as
shown in both [2]_ and [3]_.
.. math::
Nu_b = 0.023Re_b^{0.8}(\min(Pr_b, Pr_w))^{0.8}
\left(\frac{\rho_w}{\rho_b}\right)^{0.3}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr_b : float
Prandtl number with bulk fluid properties, [-]
Pr_w : float
Prandtl number with wall fluid properties, [-]
rho_w : float, optional
Density at the wall temperature, [kg/m^3]
rho_b : float, optional
Density at the bulk temperature, [kg/m^3]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
[2]_ ranked it thirteenth in the enhanced heat transfer
category, with a MAD of 19.8% and 11th in the normal heat transfer with a
MAD of 17.6%. [3]_ ranked it seventh on a combined database.
If the extra density information is not provided, it will not be used.
Examples
--------
>>> Nu_Ornatsky(1E5, 1.2, 1.5, 330, 290.)
276.63531150832307
References
----------
.. [1] Ornatsky A.P., Glushchenko, L.P., Siomin, E.T. (1970). The research
of temperature conditions of small diameter parallel tubes cooled by
water under supercritical pressures. In: Proceedings of the 4th
international heat transfer conference, Paris-Versailles, France.
Elsevier, Amsterdam, vol VI, Paper no. B, 8 November 1970
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
.. [3] Yu, Jiyang, Baoshan Jia, Dan Wu, and Daling Wang. "Optimization of
Heat Transfer Coefficient Correlation at Supercritical Pressure Using
Genetic Algorithms." Heat and Mass Transfer 45, no. 6 (January 8, 2009):
757-66. doi:10.1007/s00231-008-0475-4.
'''
Nu = 0.023*Re**0.8*min(Pr_b, Pr_w)**0.8
if rho_w and rho_b:
Nu *= (rho_w/rho_b)**0.3
return Nu | [
"def",
"Nu_Ornatsky",
"(",
"Re",
",",
"Pr_b",
",",
"Pr_w",
",",
"rho_w",
"=",
"None",
",",
"rho_b",
"=",
"None",
")",
":",
"Nu",
"=",
"0.023",
"*",
"Re",
"**",
"0.8",
"*",
"min",
"(",
"Pr_b",
",",
"Pr_w",
")",
"**",
"0.8",
"if",
"rho_w",
"and",... | 38.166667 | 25.233333 |
def _validate_options(cls, options):
"""Validate the mutually exclusive options.
Return `True` iff only zero or one of `BASE_ERROR_SELECTION_OPTIONS`
was selected.
"""
for opt1, opt2 in \
itertools.permutations(cls.BASE_ERROR_SELECTION_OPTIONS, 2):
if getattr(options, opt1) and getattr(options, opt2):
log.error('Cannot pass both {} and {}. They are '
'mutually exclusive.'.format(opt1, opt2))
return False
if options.convention and options.convention not in conventions:
log.error("Illegal convention '{}'. Possible conventions: {}"
.format(options.convention,
', '.join(conventions.keys())))
return False
return True | [
"def",
"_validate_options",
"(",
"cls",
",",
"options",
")",
":",
"for",
"opt1",
",",
"opt2",
"in",
"itertools",
".",
"permutations",
"(",
"cls",
".",
"BASE_ERROR_SELECTION_OPTIONS",
",",
"2",
")",
":",
"if",
"getattr",
"(",
"options",
",",
"opt1",
")",
... | 41.05 | 22.45 |
def deletePartials(self):
""" Delete any old partial uploads/downloads in path. """
if self.dryrun:
self._client.listPartials()
else:
self._client.deletePartials() | [
"def",
"deletePartials",
"(",
"self",
")",
":",
"if",
"self",
".",
"dryrun",
":",
"self",
".",
"_client",
".",
"listPartials",
"(",
")",
"else",
":",
"self",
".",
"_client",
".",
"deletePartials",
"(",
")"
] | 34.333333 | 10.166667 |
def do_OP_RIGHT(vm):
"""
>>> s = [b'abcdef', b'\\3']
>>> do_OP_RIGHT(s, require_minimal=True)
>>> print(s==[b'def'])
True
>>> s = [b'abcdef', b'\\0']
>>> do_OP_RIGHT(s, require_minimal=False)
>>> print(s==[b''])
True
"""
pos = vm.pop_nonnegative()
if pos > 0:
vm.append(vm.pop()[-pos:])
else:
vm.pop()
vm.append(b'') | [
"def",
"do_OP_RIGHT",
"(",
"vm",
")",
":",
"pos",
"=",
"vm",
".",
"pop_nonnegative",
"(",
")",
"if",
"pos",
">",
"0",
":",
"vm",
".",
"append",
"(",
"vm",
".",
"pop",
"(",
")",
"[",
"-",
"pos",
":",
"]",
")",
"else",
":",
"vm",
".",
"pop",
... | 22.117647 | 15.058824 |
def parse_paragraphs(self, markup):
""" Returns a list of paragraphs in the markup.
A paragraph has a title and multiple lines of plain text.
A paragraph might have parent and child paragraphs,
denoting subtitles or bigger chapters.
A paragraph might have links to additional articles.
Formats numbered lists by replacing # by 1.
Formats bulleted sublists like ** or *** with indentation.
"""
# Paragraphs to exclude.
refs = ["references", "notes", "notes and references", "external links", "further reading"]
exclude = ["see also", "media", "gallery", "related topics", "lists", "gallery", "images"]
exclude.extend(refs)
paragraphs = []
paragraph = WikipediaParagraph(self.title)
paragraph_data = ""
for chunk in markup.split("\n"):
# Strip each line of whitespace,
# unless it's a preformatted line (starts with a space).
if not chunk.startswith(" "):
chunk = chunk.strip()
# A title wrapped in "=", "==", "==="...
# denotes a new paragraphs section.
if chunk.startswith("="):
if paragraph.title.lower() in refs \
or (paragraph.parent and paragraph.parent.title.lower() in refs):
self.parse_paragraph_references(paragraph_data)
paragraph.extend(self.parse_paragraph(paragraph_data))
paragraphs.append(paragraph)
# Initialise a new paragraph.
# Create parent/child links to other paragraphs.
title = chunk.strip().strip("=")
title = self.plain(title)
paragraph = WikipediaParagraph(title)
paragraph.depth = self.parse_paragraph_heading_depth(chunk)
if paragraph.title.lower() not in exclude:
paragraph = self.connect_paragraph(paragraph, paragraphs)
paragraph_data = ""
# Underneath a title might be links to in-depth articles,
# e.g. Main articles: Computer program and Computer programming
# which in wiki markup would be {{main|Computer program|Computer programming}}
# The second line corrects" {{Main|Credit (finance)}} or {{Main|Usury}}".
elif re.search(re.compile("^{{main", re.I), chunk):
paragraph.main = [link.strip("} ") for link in chunk.split("|")[1:]]
paragraph.main = [re.sub(re.compile("}}.*?{{main", re.I), "", link)
for link in paragraph.main]
# At the bottom might be links to related articles,
# e.g. See also: Abundance of the chemical elements
# which in wiki markup would be {{see also|Abundance of the chemical elements}}
elif re.search(re.compile("^{{see {0,1}also", re.I), chunk):
paragraph.related = [link.strip("} ") for link in chunk.split("|")[1:]]
# Accumulate the data in this paragraph,
# we'll process it once a new paragraph starts.
else:
paragraph_data += chunk +"\n"
# Append the last paragraph.
if paragraph.title.lower() in refs \
or (paragraph.parent and paragraph.parent.title.lower() in refs):
self.parse_paragraph_references(paragraph_data)
paragraph.extend(self.parse_paragraph(paragraph_data))
paragraphs.append(paragraph)
# The "See also" paragraph is an enumeration of links
# which we already parsed so don't show them.
# We also did references, and other paragraphs are not that relevant.
paragraphs_exclude = []
for paragraph in paragraphs:
if paragraph.title.lower() not in exclude \
and not (paragraph.parent and paragraph.parent.title.lower() in exclude):
paragraphs_exclude.append(paragraph)
if len(paragraphs_exclude) == 1 and \
len(paragraphs_exclude[0]) == 0:
return []
return paragraphs_exclude | [
"def",
"parse_paragraphs",
"(",
"self",
",",
"markup",
")",
":",
"# Paragraphs to exclude.",
"refs",
"=",
"[",
"\"references\"",
",",
"\"notes\"",
",",
"\"notes and references\"",
",",
"\"external links\"",
",",
"\"further reading\"",
"]",
"exclude",
"=",
"[",
"\"se... | 46.461538 | 21.56044 |
def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""):
"""Dynamically add syntaxtical elements to query.
This functions adds syntactical elements to the query string, and
report title, based on the types and number of items added thus far.
Args:
flag_filt (bool): at least one filter item specified.
qry_string (str): portion of the query constructed thus far.
param_str (str): the title to display before the list.
flag_id (bool): optional - instance-id was specified.
filt_st (str): optional - syntax to add on end if filter specified.
Returns:
qry_string (str): the portion of the query that was passed in with
the appropriate syntactical elements added.
param_str (str): the title to display before the list.
"""
if flag_id or flag_filt:
qry_string += ", "
param_str += ", "
if not flag_filt:
qry_string += filt_st
return (qry_string, param_str) | [
"def",
"qry_helper",
"(",
"flag_id",
",",
"qry_string",
",",
"param_str",
",",
"flag_filt",
"=",
"False",
",",
"filt_st",
"=",
"\"\"",
")",
":",
"if",
"flag_id",
"or",
"flag_filt",
":",
"qry_string",
"+=",
"\", \"",
"param_str",
"+=",
"\", \"",
"if",
"not"... | 39.72 | 24.2 |
def from_headers(self, headers):
"""Generate a SpanContext object from B3 propagation headers.
:type headers: dict
:param headers: HTTP request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from B3 propagation headers.
"""
if headers is None:
return SpanContext(from_header=False)
trace_id, span_id, sampled = None, None, None
state = headers.get(_STATE_HEADER_KEY)
if state:
fields = state.split('-', 4)
if len(fields) == 1:
sampled = fields[0]
elif len(fields) == 2:
trace_id, span_id = fields
elif len(fields) == 3:
trace_id, span_id, sampled = fields
elif len(fields) == 4:
trace_id, span_id, sampled, _parent_span_id = fields
else:
return SpanContext(from_header=False)
else:
trace_id = headers.get(_TRACE_ID_KEY)
span_id = headers.get(_SPAN_ID_KEY)
sampled = headers.get(_SAMPLED_KEY)
if sampled is not None:
# The specification encodes an enabled tracing decision as "1".
# In the wild pre-standard implementations might still send "true".
# "d" is set in the single header case when debugging is enabled.
sampled = sampled.lower() in ('1', 'd', 'true')
else:
# If there's no incoming sampling decision, it was deferred to us.
# Even though we set it to False here, we might still sample
# depending on the tracer configuration.
sampled = False
trace_options = TraceOptions()
trace_options.set_enabled(sampled)
# TraceId and SpanId headers both have to exist
if not trace_id or not span_id:
return SpanContext(trace_options=trace_options)
# Convert 64-bit trace ids to 128-bit
if len(trace_id) == 16:
trace_id = '0'*16 + trace_id
span_context = SpanContext(
trace_id=trace_id,
span_id=span_id,
trace_options=trace_options,
from_header=True
)
return span_context | [
"def",
"from_headers",
"(",
"self",
",",
"headers",
")",
":",
"if",
"headers",
"is",
"None",
":",
"return",
"SpanContext",
"(",
"from_header",
"=",
"False",
")",
"trace_id",
",",
"span_id",
",",
"sampled",
"=",
"None",
",",
"None",
",",
"None",
"state",
... | 35.174603 | 18.095238 |
def cached(fn, size=32):
''' this decorator creates a type safe lru_cache
around the decorated function. Unlike
functools.lru_cache, this will not crash when
unhashable arguments are passed to the function'''
assert callable(fn)
assert isinstance(size, int)
return overload(fn)(lru_cache(size, typed=True)(fn)) | [
"def",
"cached",
"(",
"fn",
",",
"size",
"=",
"32",
")",
":",
"assert",
"callable",
"(",
"fn",
")",
"assert",
"isinstance",
"(",
"size",
",",
"int",
")",
"return",
"overload",
"(",
"fn",
")",
"(",
"lru_cache",
"(",
"size",
",",
"typed",
"=",
"True"... | 41.375 | 11.625 |
def makeServoIDPacket(curr_id, new_id):
"""
Given the current ID, returns a packet to set the servo to a new ID
"""
pkt = Packet.makeWritePacket(curr_id, xl320.XL320_ID, [new_id])
return pkt | [
"def",
"makeServoIDPacket",
"(",
"curr_id",
",",
"new_id",
")",
":",
"pkt",
"=",
"Packet",
".",
"makeWritePacket",
"(",
"curr_id",
",",
"xl320",
".",
"XL320_ID",
",",
"[",
"new_id",
"]",
")",
"return",
"pkt"
] | 31.666667 | 13.666667 |
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info | [
"def",
"add_elasticache_replication_group",
"(",
"self",
",",
"replication_group",
",",
"region",
")",
":",
"# Only want available clusters unless all_elasticache_replication_groups is True",
"if",
"not",
"self",
".",
"all_elasticache_replication_groups",
"and",
"replication_group"... | 43.72549 | 30.196078 |
def pelix_infos(self):
"""
Basic information about the Pelix framework instance
"""
framework = self.__context.get_framework()
return {
"version": framework.get_version(),
"properties": framework.get_properties(),
} | [
"def",
"pelix_infos",
"(",
"self",
")",
":",
"framework",
"=",
"self",
".",
"__context",
".",
"get_framework",
"(",
")",
"return",
"{",
"\"version\"",
":",
"framework",
".",
"get_version",
"(",
")",
",",
"\"properties\"",
":",
"framework",
".",
"get_properti... | 31 | 13.666667 |
def timestamp_with_tzinfo(dt):
"""
Serialize a date/time value into an ISO8601 text representation
adjusted (if needed) to UTC timezone.
For instance:
>>> serialize_date(datetime(2012, 4, 10, 22, 38, 20, 604391))
'2012-04-10T22:38:20.604391Z'
"""
utc = tzutc()
if dt.tzinfo:
dt = dt.astimezone(utc).replace(tzinfo=None)
return dt.isoformat() + 'Z' | [
"def",
"timestamp_with_tzinfo",
"(",
"dt",
")",
":",
"utc",
"=",
"tzutc",
"(",
")",
"if",
"dt",
".",
"tzinfo",
":",
"dt",
"=",
"dt",
".",
"astimezone",
"(",
"utc",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"return",
"dt",
".",
"isoforma... | 27.428571 | 17.142857 |
def detect(self, text):
"""Detect language of the input text
:param text: The source text(s) whose language you want to identify.
Batch detection is supported via sequence input.
:type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
:rtype: Detected
:rtype: :class:`list` (when a list is passed)
Basic usage:
>>> from googletrans import Translator
>>> translator = Translator()
>>> translator.detect('이 문장은 한글로 쓰여졌습니다.')
<Detected lang=ko confidence=0.27041003>
>>> translator.detect('この文章は日本語で書かれました。')
<Detected lang=ja confidence=0.64889508>
>>> translator.detect('This sentence is written in English.')
<Detected lang=en confidence=0.22348526>
>>> translator.detect('Tiu frazo estas skribita en Esperanto.')
<Detected lang=eo confidence=0.10538048>
Advanced usage:
>>> langs = translator.detect(['한국어', '日本語', 'English', 'le français'])
>>> for lang in langs:
... print(lang.lang, lang.confidence)
ko 1
ja 0.92929292
en 0.96954316
fr 0.043500196
"""
if isinstance(text, list):
result = []
for item in text:
lang = self.detect(item)
result.append(lang)
return result
data = self._translate(text, dest='en', src='auto')
# actual source language that will be recognized by Google Translator when the
# src passed is equal to auto.
src = ''
confidence = 0.0
try:
src = ''.join(data[8][0])
confidence = data[8][-2][0]
except Exception: # pragma: nocover
pass
result = Detected(lang=src, confidence=confidence)
return result | [
"def",
"detect",
"(",
"self",
",",
"text",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"list",
")",
":",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"text",
":",
"lang",
"=",
"self",
".",
"detect",
"(",
"item",
")",
"result",
".",
"append",... | 36.576923 | 19.942308 |
def on_setButton_pressed(self):
"""
Start recording a key combination when the user clicks on the setButton.
The button itself is automatically disabled during the recording process.
"""
self.keyLabel.setText("Press a key or combination...") # TODO: i18n
logger.debug("User starts to record a key combination.")
self.grabber = iomediator.KeyGrabber(self)
self.grabber.start() | [
"def",
"on_setButton_pressed",
"(",
"self",
")",
":",
"self",
".",
"keyLabel",
".",
"setText",
"(",
"\"Press a key or combination...\"",
")",
"# TODO: i18n",
"logger",
".",
"debug",
"(",
"\"User starts to record a key combination.\"",
")",
"self",
".",
"grabber",
"=",... | 48 | 19.111111 |
def execute(self, env, args):
""" Prints task information.
`env`
Runtime ``Environment`` instance.
`args`
Arguments object from arg parser.
"""
start = self._fuzzy_time_parse(args.start)
if not start:
raise errors.FocusError(u'Invalid start period provided')
stats = self._get_stats(env.task, start)
self._print_stats(env, stats) | [
"def",
"execute",
"(",
"self",
",",
"env",
",",
"args",
")",
":",
"start",
"=",
"self",
".",
"_fuzzy_time_parse",
"(",
"args",
".",
"start",
")",
"if",
"not",
"start",
":",
"raise",
"errors",
".",
"FocusError",
"(",
"u'Invalid start period provided'",
")",... | 29.2 | 17.533333 |
def loadMetadata(self):
""" #TODO: docstring """
#TODO: change that spectra dont have to be iterated to extract metadata
#node
if self._parsed:
raise TypeError('Mzml file already parsed.')
[None for _ in self._parseMzml()]
self._parsed = True | [
"def",
"loadMetadata",
"(",
"self",
")",
":",
"#TODO: change that spectra dont have to be iterated to extract metadata",
"#node",
"if",
"self",
".",
"_parsed",
":",
"raise",
"TypeError",
"(",
"'Mzml file already parsed.'",
")",
"[",
"None",
"for",
"_",
"in",
"self",
"... | 36.875 | 16.125 |
def setup(applicationName,
applicationType=None,
style='plastique',
splash='',
splashType=None,
splashTextColor='white',
splashTextAlign=None,
theme=''):
"""
Wrapper system for the QApplication creation process to handle all proper
pre-application setup. This method will verify that there is no application
running, creating one if necessary. If no application is created, a None
value is returned - signaling that there is already an app running. If you
need to specify your own QApplication subclass, you can do so through the
applicationType parameter.
:note This method should always be used with the exec_ method to
handle the post setup process.
:param applicationName | <str>
applicationType | <subclass of QApplication> || None
style | <str> || <QStyle> | style to use for the new app
splash | <str> | filepath to use for a splash screen
splashType | <subclass of QSplashScreen> || None
splashTextColor | <str> || <QColor>
splashTextAlign | <Qt.Alignment>
:usage |import projexui
|
|def main(argv):
| # initialize the application
| data = projexui.setup()
|
| # do some initialization code
| window = MyWindow()
| window.show()
|
| # execute the application
| projexui.exec_(window, data)
:return { <str> key: <variant> value, .. }
"""
import_qt(globals())
output = {}
# check to see if there is a qapplication running
if not QtGui.QApplication.instance():
# make sure we have a valid QApplication type
if applicationType is None:
applicationType = QtGui.QApplication
app = applicationType([applicationName])
app.setApplicationName(applicationName)
app.setQuitOnLastWindowClosed(True)
stylize(app, style=style, theme=theme)
# utilized with the projexui.config.xschemeconfig
app.setProperty('useScheme', wrapVariant(True))
output['app'] = app
# create a new splash screen if desired
if splash:
if not splashType:
splashType = XLoggerSplashScreen
pixmap = QtGui.QPixmap(splash)
screen = splashType(pixmap)
if splashTextAlign is None:
splashTextAlign = QtCore.Qt.AlignLeft | QtCore.Qt.AlignBottom
screen.setTextColor(QtGui.QColor(splashTextColor))
screen.setTextAlignment(splashTextAlign)
screen.show()
QtGui.QApplication.instance().processEvents()
output['splash'] = screen
return output | [
"def",
"setup",
"(",
"applicationName",
",",
"applicationType",
"=",
"None",
",",
"style",
"=",
"'plastique'",
",",
"splash",
"=",
"''",
",",
"splashType",
"=",
"None",
",",
"splashTextColor",
"=",
"'white'",
",",
"splashTextAlign",
"=",
"None",
",",
"theme"... | 36.292683 | 18.097561 |
def new(self, log_block_size):
# type: (int) -> None
'''
Create a new Version Volume Descriptor.
Parameters:
log_block_size - The size of one extent.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Version Volume Descriptor is already initialized')
self._data = b'\x00' * log_block_size
self._initialized = True | [
"def",
"new",
"(",
"self",
",",
"log_block_size",
")",
":",
"# type: (int) -> None",
"if",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'This Version Volume Descriptor is already initialized'",
")",
"self",
".",
"_dat... | 29.8 | 22.6 |
def mod(self, other, axis="columns", level=None, fill_value=None):
"""Mods this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the mod against this.
axis: The axis to mod over.
level: The Multilevel index level to apply mod over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Mod applied.
"""
return self._binary_op(
"mod", other, axis=axis, level=level, fill_value=fill_value
) | [
"def",
"mod",
"(",
"self",
",",
"other",
",",
"axis",
"=",
"\"columns\"",
",",
"level",
"=",
"None",
",",
"fill_value",
"=",
"None",
")",
":",
"return",
"self",
".",
"_binary_op",
"(",
"\"mod\"",
",",
"other",
",",
"axis",
"=",
"axis",
",",
"level",
... | 38.2 | 20.066667 |
def gpdfitnew(x, sort=True, sort_in_place=False, return_quadrature=False):
"""Estimate the paramaters for the Generalized Pareto Distribution (GPD)
Returns empirical Bayes estimate for the parameters of the two-parameter
generalized Parato distribution given the data.
Parameters
----------
x : ndarray
One dimensional data array
sort : bool or ndarray, optional
If known in advance, one can provide an array of indices that would
sort the input array `x`. If the input array is already sorted, provide
False. If True (default behaviour), the array is sorted internally.
sort_in_place : bool, optional
If `sort` is True and `sort_in_place` is True, the array is sorted
in-place (False by default).
return_quadrature : bool, optional
If True, quadrature points and weight `ks` and `w` of the marginal posterior distribution of k are also calculated and returned. False by
default.
Returns
-------
k, sigma : float
estimated parameter values
ks, w : ndarray
Quadrature points and weights of the marginal posterior distribution
of `k`. Returned only if `return_quadrature` is True.
Notes
-----
This function returns a negative of Zhang and Stephens's k, because it is
more common parameterisation.
"""
if x.ndim != 1 or len(x) <= 1:
raise ValueError("Invalid input array.")
# check if x should be sorted
if sort is True:
if sort_in_place:
x.sort()
xsorted = True
else:
sort = np.argsort(x)
xsorted = False
elif sort is False:
xsorted = True
else:
xsorted = False
n = len(x)
PRIOR = 3
m = 30 + int(np.sqrt(n))
bs = np.arange(1, m + 1, dtype=float)
bs -= 0.5
np.divide(m, bs, out=bs)
np.sqrt(bs, out=bs)
np.subtract(1, bs, out=bs)
if xsorted:
bs /= PRIOR * x[int(n/4 + 0.5) - 1]
bs += 1 / x[-1]
else:
bs /= PRIOR * x[sort[int(n/4 + 0.5) - 1]]
bs += 1 / x[sort[-1]]
ks = np.negative(bs)
temp = ks[:,None] * x
np.log1p(temp, out=temp)
np.mean(temp, axis=1, out=ks)
L = bs / ks
np.negative(L, out=L)
np.log(L, out=L)
L -= ks
L -= 1
L *= n
temp = L - L[:,None]
np.exp(temp, out=temp)
w = np.sum(temp, axis=1)
np.divide(1, w, out=w)
# remove negligible weights
dii = w >= 10 * np.finfo(float).eps
if not np.all(dii):
w = w[dii]
bs = bs[dii]
# normalise w
w /= w.sum()
# posterior mean for b
b = np.sum(bs * w)
# Estimate for k, note that we return a negative of Zhang and
# Stephens's k, because it is more common parameterisation.
temp = (-b) * x # pylint: disable=invalid-unary-operand-type
np.log1p(temp, out=temp)
k = np.mean(temp)
if return_quadrature:
np.negative(x, out=temp)
temp = bs[:, None] * temp
np.log1p(temp, out=temp)
ks = np.mean(temp, axis=1)
# estimate for sigma
sigma = -k / b * n / (n - 0)
# weakly informative prior for k
a = 10
k = k * n / (n+a) + a * 0.5 / (n+a)
if return_quadrature:
ks *= n / (n+a)
ks += a * 0.5 / (n+a)
if return_quadrature:
return k, sigma, ks, w
else:
return k, sigma | [
"def",
"gpdfitnew",
"(",
"x",
",",
"sort",
"=",
"True",
",",
"sort_in_place",
"=",
"False",
",",
"return_quadrature",
"=",
"False",
")",
":",
"if",
"x",
".",
"ndim",
"!=",
"1",
"or",
"len",
"(",
"x",
")",
"<=",
"1",
":",
"raise",
"ValueError",
"(",... | 26.950413 | 21.603306 |
def getLogs(self, CorpNum, ItemCode, MgtKey):
""" 전자명세서 문서이력 목록 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서이력 정보 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Logs', CorpNum) | [
"def",
"getLogs",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCod... | 40.052632 | 15.473684 |
def update_user(self, user, email=None, username=None,
uid=None, defaultRegion=None, enabled=None):
"""
Allows you to update settings for a given user.
"""
user_id = utils.get_id(user)
uri = "users/%s" % user_id
upd = {"id": user_id}
if email is not None:
upd["email"] = email
if defaultRegion is not None:
upd["RAX-AUTH:defaultRegion"] = defaultRegion
if username is not None:
upd["username"] = username
if enabled is not None:
upd["enabled"] = enabled
data = {"user": upd}
resp, resp_body = self.method_put(uri, data=data)
if resp.status_code in (401, 403, 404):
raise exc.AuthorizationFailure("You are not authorized to update "
"users.")
return User(self, resp_body) | [
"def",
"update_user",
"(",
"self",
",",
"user",
",",
"email",
"=",
"None",
",",
"username",
"=",
"None",
",",
"uid",
"=",
"None",
",",
"defaultRegion",
"=",
"None",
",",
"enabled",
"=",
"None",
")",
":",
"user_id",
"=",
"utils",
".",
"get_id",
"(",
... | 38.772727 | 9.863636 |
def _handle_tag_space(self, data, text):
"""Handle whitespace (*text*) inside of an HTML open tag."""
ctx = data.context
end_of_value = ctx & data.CX_ATTR_VALUE and not ctx & (data.CX_QUOTED | data.CX_NOTE_QUOTE)
if end_of_value or (ctx & data.CX_QUOTED and ctx & data.CX_NOTE_SPACE):
self._push_tag_buffer(data)
data.context = data.CX_ATTR_READY
elif ctx & data.CX_NOTE_SPACE:
data.context = data.CX_ATTR_READY
elif ctx & data.CX_ATTR_NAME:
data.context |= data.CX_NOTE_EQUALS
data.padding_buffer["before_eq"] += text
if ctx & data.CX_QUOTED and not ctx & data.CX_NOTE_SPACE:
self._emit_text(text)
elif data.context & data.CX_ATTR_READY:
data.padding_buffer["first"] += text
elif data.context & data.CX_ATTR_VALUE:
data.padding_buffer["after_eq"] += text | [
"def",
"_handle_tag_space",
"(",
"self",
",",
"data",
",",
"text",
")",
":",
"ctx",
"=",
"data",
".",
"context",
"end_of_value",
"=",
"ctx",
"&",
"data",
".",
"CX_ATTR_VALUE",
"and",
"not",
"ctx",
"&",
"(",
"data",
".",
"CX_QUOTED",
"|",
"data",
".",
... | 50.333333 | 11.777778 |
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
Note:
Modified function from the Python 3.5 inspect standard library module
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights
Reserved"
See also py-cloud-compute-cannon/NOTICES.
"""
if inspect.ismethod(func):
func = func.__func__
elif not inspect.isroutine(func):
raise TypeError("'{!r}' is not a Python function".format(func))
# AMVMOD: deal with python 2 builtins that don't define these
code = getattr(func, '__code__', None)
closure = getattr(func, '__closure__', None)
co_names = getattr(code, 'co_names', ())
glb = getattr(func, '__globals__', {})
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if closure is None:
nonlocal_vars = {}
else:
nonlocal_vars = {var: cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = glb
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if inspect.ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return {'nonlocal': nonlocal_vars,
'global': global_vars,
'builtin': builtin_vars,
'unbound': unbound_names} | [
"def",
"getclosurevars",
"(",
"func",
")",
":",
"if",
"inspect",
".",
"ismethod",
"(",
"func",
")",
":",
"func",
"=",
"func",
".",
"__func__",
"elif",
"not",
"inspect",
".",
"isroutine",
"(",
"func",
")",
":",
"raise",
"TypeError",
"(",
"\"'{!r}' is not ... | 36.412698 | 20.507937 |
def create_policy(policyName, policyDocument,
region=None, key=None, keyid=None, profile=None):
'''
Given a valid config, create a policy.
Returns {created: true} if the policy was created and returns
{created: False} if the policy was not created.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.create_policy my_policy \\
'{"Version":"2015-12-12",\\
"Statement":[{"Effect":"Allow",\\
"Action":["iot:Publish"],\\
"Resource":["arn:::::topic/foo/bar"]}]}'
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not isinstance(policyDocument, string_types):
policyDocument = salt.utils.json.dumps(policyDocument)
policy = conn.create_policy(policyName=policyName,
policyDocument=policyDocument)
if policy:
log.info('The newly created policy version is %s', policy['policyVersionId'])
return {'created': True, 'versionId': policy['policyVersionId']}
else:
log.warning('Policy was not created')
return {'created': False}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} | [
"def",
"create_policy",
"(",
"policyName",
",",
"policyDocument",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",... | 37.228571 | 24.771429 |
def websocket_query(self, path, params={}):
"""
Open a websocket connection
:param path: Endpoint in API
:param params: Parameters added as a query arg
:returns: Websocket
"""
url = "http://docker/v" + self._api_version + "/" + path
connection = yield from self._session.ws_connect(url,
origin="http://docker",
autoping=True)
return connection | [
"def",
"websocket_query",
"(",
"self",
",",
"path",
",",
"params",
"=",
"{",
"}",
")",
":",
"url",
"=",
"\"http://docker/v\"",
"+",
"self",
".",
"_api_version",
"+",
"\"/\"",
"+",
"path",
"connection",
"=",
"yield",
"from",
"self",
".",
"_session",
".",
... | 37 | 17.857143 |
def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.data[name] = data
return name | [
"def",
"add",
"(",
"self",
",",
"data",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"n",
"=",
"len",
"(",
"self",
".",
"data",
")",
"while",
"\"Series %d\"",
"%",
"n",
"in",
"self",
".",
"data",
":",
"n",
"+=",
"1",
"... | 28.368421 | 19.315789 |
def write(self, data):
"""! @brief Write bytes into the connection."""
# If nobody is connected, act like all data was written anyway.
if self.connected is None:
return 0
data = to_bytes_safe(data)
size = len(data)
remaining = size
while remaining:
count = self._abstract_socket.write(data)
remaining -= count
if remaining:
data = data[count:]
return size | [
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"# If nobody is connected, act like all data was written anyway.",
"if",
"self",
".",
"connected",
"is",
"None",
":",
"return",
"0",
"data",
"=",
"to_bytes_safe",
"(",
"data",
")",
"size",
"=",
"len",
"(",
"d... | 33.571429 | 13.785714 |
async def AddToUnit(self, storages):
'''
storages : typing.Sequence[~StorageAddParams]
Returns -> typing.Sequence[~AddStorageResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Storage',
request='AddToUnit',
version=4,
params=_params)
_params['storages'] = storages
reply = await self.rpc(msg)
return reply | [
"async",
"def",
"AddToUnit",
"(",
"self",
",",
"storages",
")",
":",
"# map input types to rpc msg",
"_params",
"=",
"dict",
"(",
")",
"msg",
"=",
"dict",
"(",
"type",
"=",
"'Storage'",
",",
"request",
"=",
"'AddToUnit'",
",",
"version",
"=",
"4",
",",
"... | 32.357143 | 11.357143 |
def path(self):
"""
Build the path (prefix) leading up to this namespace.
"""
return "/".join([
part
for part in [
self.version,
self.qualifier,
]
if part
]) | [
"def",
"path",
"(",
"self",
")",
":",
"return",
"\"/\"",
".",
"join",
"(",
"[",
"part",
"for",
"part",
"in",
"[",
"self",
".",
"version",
",",
"self",
".",
"qualifier",
",",
"]",
"if",
"part",
"]",
")"
] | 20.461538 | 18.307692 |
def stderr_redirected(to=os.devnull):
"""
import os
with stderr_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
"""
fd = sys.stderr.fileno()
# assert that Python and C stdio write using the same file descriptor
# assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stderr")) == fd == 1
def _redirect_stderr(to):
sys.stderr.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stderr = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stderr:
with open(to, 'w') as file:
_redirect_stderr(to=file)
try:
yield # allow code to be run with the redirected stderr
finally:
_redirect_stderr(to=old_stderr) | [
"def",
"stderr_redirected",
"(",
"to",
"=",
"os",
".",
"devnull",
")",
":",
"fd",
"=",
"sys",
".",
"stderr",
".",
"fileno",
"(",
")",
"# assert that Python and C stdio write using the same file descriptor",
"# assert libc.fileno(ctypes.c_void_p.in_dll(libc, \"stderr\")) == fd... | 33.68 | 18.48 |
def build_instruction_coverage_plugin() -> LaserPlugin:
""" Creates an instance of the instruction coverage plugin"""
from mythril.laser.ethereum.plugins.implementations.coverage import (
InstructionCoveragePlugin,
)
return InstructionCoveragePlugin() | [
"def",
"build_instruction_coverage_plugin",
"(",
")",
"->",
"LaserPlugin",
":",
"from",
"mythril",
".",
"laser",
".",
"ethereum",
".",
"plugins",
".",
"implementations",
".",
"coverage",
"import",
"(",
"InstructionCoveragePlugin",
",",
")",
"return",
"InstructionCov... | 41.428571 | 18.142857 |
def setValue(self, key, value, channel=1):
"""
Some devices allow to directly set values to perform a specific task.
"""
if channel in self.CHANNELS:
return self.CHANNELS[channel].setValue(key, value)
LOG.error("HMDevice.setValue: channel not found %i!" % channel) | [
"def",
"setValue",
"(",
"self",
",",
"key",
",",
"value",
",",
"channel",
"=",
"1",
")",
":",
"if",
"channel",
"in",
"self",
".",
"CHANNELS",
":",
"return",
"self",
".",
"CHANNELS",
"[",
"channel",
"]",
".",
"setValue",
"(",
"key",
",",
"value",
")... | 38.75 | 17 |
def _split_section_and_key(key):
"""Return a tuple with config section and key."""
parts = key.split('.')
if len(parts) > 1:
return 'renku "{0}"'.format(parts[0]), '.'.join(parts[1:])
return 'renku', key | [
"def",
"_split_section_and_key",
"(",
"key",
")",
":",
"parts",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"return",
"'renku \"{0}\"'",
".",
"format",
"(",
"parts",
"[",
"0",
"]",
")",
",",
"'.'",
".",... | 37 | 13.833333 |
def execute(self, **kwargs):
"""
Execute the interactive guessing procedure.
:param show: Whether or not to show the figure. Useful for testing.
:type show: bool
:param block: Blocking call to matplotlib
:type show: bool
Any additional keyword arguments are passed to
matplotlib.pyplot.show().
"""
show = kwargs.pop('show')
if show:
# self.fig.show() # Apparently this does something else,
# see https://github.com/matplotlib/matplotlib/issues/6138
plt.show(**kwargs) | [
"def",
"execute",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"show",
"=",
"kwargs",
".",
"pop",
"(",
"'show'",
")",
"if",
"show",
":",
"# self.fig.show() # Apparently this does something else,",
"# see https://github.com/matplotlib/matplotlib/issues/6138",
"plt",
... | 34 | 17.647059 |
def _slice2rows(self, start, stop, step=None):
"""
Convert a slice to an explicit array of rows
"""
nrows = self._info['nrows']
if start is None:
start = 0
if stop is None:
stop = nrows
if step is None:
step = 1
tstart = self._fix_range(start)
tstop = self._fix_range(stop)
if tstart == 0 and tstop == nrows:
# this is faster: if all fields are also requested, then a
# single fread will be done
return None
if stop < start:
raise ValueError("start is greater than stop in slice")
return numpy.arange(tstart, tstop, step, dtype='i8') | [
"def",
"_slice2rows",
"(",
"self",
",",
"start",
",",
"stop",
",",
"step",
"=",
"None",
")",
":",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"0",
"if",
"stop",
"is",
"None",
":",
"stop... | 33.047619 | 13.428571 |
def normalized_axes_tuple(axes, ndim):
"""Return a tuple of ``axes`` converted to positive integers.
This function turns negative entries into equivalent positive
ones according to standard Python indexing "from the right".
Parameters
----------
axes : int or sequence of ints
Single integer or integer sequence of arbitrary length.
Duplicate entries are not allowed. All entries must fulfill
``-ndim <= axis <= ndim - 1``.
ndim : positive int
Number of available axes determining the valid axis range.
Returns
-------
axes_list : tuple of ints
The converted tuple of axes.
Examples
--------
Normalizing a sequence of axes:
>>> normalized_axes_tuple([0, -1, 2], ndim=3)
(0, 2, 2)
Single integer works, too:
>>> normalized_axes_tuple(-3, ndim=3)
(0,)
"""
try:
axes, axes_in = (int(axes),), axes
except TypeError:
axes, axes_in = tuple(int(axis) for axis in axes), axes
if any(axis != axis_in for axis, axis_in in zip(axes, axes_in)):
raise ValueError('`axes` may only contain integers, got {}'
''.format(axes_in))
else:
if axes[0] != axes_in:
raise TypeError('`axes` must be integer or sequence, got {}'
''.format(axes_in))
if len(set(axes)) != len(axes):
raise ValueError('`axes` may not contain duplicate entries')
ndim, ndim_in = int(ndim), ndim
if ndim <= 0:
raise ValueError('`ndim` must be positive, got {}'.format(ndim_in))
axes_arr = np.array(axes)
axes_arr[axes_arr < 0] += ndim
if np.any((axes_arr < 0) | (axes_arr >= ndim)):
raise ValueError('all `axes` entries must satisfy -{0} <= axis < {0}, '
'got {1}'.format(ndim, axes_in))
return tuple(axes_arr) | [
"def",
"normalized_axes_tuple",
"(",
"axes",
",",
"ndim",
")",
":",
"try",
":",
"axes",
",",
"axes_in",
"=",
"(",
"int",
"(",
"axes",
")",
",",
")",
",",
"axes",
"except",
"TypeError",
":",
"axes",
",",
"axes_in",
"=",
"tuple",
"(",
"int",
"(",
"ax... | 31.62069 | 22.034483 |
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs) | [
"def",
"syndic_cmd",
"(",
"self",
",",
"data",
")",
":",
"# Set up default tgt_type",
"if",
"'tgt_type'",
"not",
"in",
"data",
":",
"data",
"[",
"'tgt_type'",
"]",
"=",
"'glob'",
"kwargs",
"=",
"{",
"}",
"# optionally add a few fields to the publish data",
"for",
... | 38.225806 | 16.096774 |
def ParseFileLNKFile(
self, parser_mediator, file_object, display_name):
"""Parses a Windows Shortcut (LNK) file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
display_name (str): display name.
"""
lnk_file = pylnk.file()
lnk_file.set_ascii_codepage(parser_mediator.codepage)
try:
lnk_file.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open file with error: {0!s}'.format(exception))
return
link_target = None
if lnk_file.link_target_identifier_data:
# TODO: change file_entry.name to display name once it is generated
# correctly.
display_name = parser_mediator.GetFilename()
shell_items_parser = shell_items.ShellItemsParser(display_name)
shell_items_parser.ParseByteStream(
parser_mediator, lnk_file.link_target_identifier_data,
codepage=parser_mediator.codepage)
link_target = shell_items_parser.CopyToPath()
event_data = WinLnkLinkEventData()
event_data.birth_droid_file_identifier = (
lnk_file.birth_droid_file_identifier)
event_data.birth_droid_volume_identifier = (
lnk_file.birth_droid_volume_identifier)
event_data.command_line_arguments = lnk_file.command_line_arguments
event_data.description = lnk_file.description
event_data.drive_serial_number = lnk_file.drive_serial_number
event_data.drive_type = lnk_file.drive_type
event_data.droid_file_identifier = lnk_file.droid_file_identifier
event_data.droid_volume_identifier = lnk_file.droid_volume_identifier
event_data.env_var_location = lnk_file.environment_variables_location
event_data.file_attribute_flags = lnk_file.file_attribute_flags
event_data.file_size = lnk_file.file_size
event_data.icon_location = lnk_file.icon_location
event_data.link_target = link_target
event_data.local_path = lnk_file.local_path
event_data.network_path = lnk_file.network_path
event_data.relative_path = lnk_file.relative_path
event_data.volume_label = lnk_file.volume_label
event_data.working_directory = lnk_file.working_directory
access_time = lnk_file.get_file_access_time_as_integer()
if access_time != 0:
date_time = dfdatetime_filetime.Filetime(timestamp=access_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
creation_time = lnk_file.get_file_creation_time_as_integer()
if creation_time != 0:
date_time = dfdatetime_filetime.Filetime(timestamp=creation_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
modification_time = lnk_file.get_file_modification_time_as_integer()
if modification_time != 0:
date_time = dfdatetime_filetime.Filetime(timestamp=modification_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if access_time == 0 and creation_time == 0 and modification_time == 0:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
if lnk_file.droid_file_identifier:
try:
self._ParseDistributedTrackingIdentifier(
parser_mediator, lnk_file.droid_file_identifier, display_name)
except (TypeError, ValueError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to read droid file identifier with error: {0!s}.'.format(
exception))
if lnk_file.birth_droid_file_identifier:
try:
self._ParseDistributedTrackingIdentifier(
parser_mediator, lnk_file.birth_droid_file_identifier, display_name)
except (TypeError, ValueError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read birth droid file identifier with error: '
'{0!s}.').format(exception))
lnk_file.close() | [
"def",
"ParseFileLNKFile",
"(",
"self",
",",
"parser_mediator",
",",
"file_object",
",",
"display_name",
")",
":",
"lnk_file",
"=",
"pylnk",
".",
"file",
"(",
")",
"lnk_file",
".",
"set_ascii_codepage",
"(",
"parser_mediator",
".",
"codepage",
")",
"try",
":",... | 43.94 | 19.89 |
def plot_border(mask, should_plot_border, units, kpc_per_arcsec, pointsize, zoom_offset_pixels):
"""Plot the borders of the mask or the array on the figure.
Parameters
-----------t.
mask : ndarray of data.array.mask.Mask
The mask applied to the array, the edge of which is plotted as a set of points over the plotted array.
should_plot_border : bool
If a mask is supplied, its borders pixels (e.g. the exterior edge) is plotted if this is *True*.
units : str
The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').
kpc_per_arcsec : float or None
The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.
border_pointsize : int
The size of the points plotted to show the borders.
"""
if should_plot_border and mask is not None:
plt.gca()
border_pixels = mask.masked_grid_index_to_pixel[mask.border_pixels]
if zoom_offset_pixels is not None:
border_pixels -= zoom_offset_pixels
border_arcsec = mask.grid_pixels_to_grid_arcsec(grid_pixels=border_pixels)
border_units = convert_grid_units(array=mask, grid_arcsec=border_arcsec, units=units,
kpc_per_arcsec=kpc_per_arcsec)
plt.scatter(y=border_units[:,0], x=border_units[:,1], s=pointsize, c='y') | [
"def",
"plot_border",
"(",
"mask",
",",
"should_plot_border",
",",
"units",
",",
"kpc_per_arcsec",
",",
"pointsize",
",",
"zoom_offset_pixels",
")",
":",
"if",
"should_plot_border",
"and",
"mask",
"is",
"not",
"None",
":",
"plt",
".",
"gca",
"(",
")",
"borde... | 47.310345 | 30.275862 |
def all(self):
"""
Returns a list of cached instances.
"""
class_list = list(self.get_class_list())
if not class_list:
self.cache = []
return []
if self.cache is not None:
return self.cache
results = []
for cls_path in class_list:
module_name, class_name = cls_path.rsplit('.', 1)
try:
module = __import__(module_name, {}, {}, class_name)
cls = getattr(module, class_name)
if self.instances:
results.append(cls())
else:
results.append(cls)
except Exception:
logger.exception('Unable to import {cls}'.format(cls=cls_path))
continue
self.cache = results
return results | [
"def",
"all",
"(",
"self",
")",
":",
"class_list",
"=",
"list",
"(",
"self",
".",
"get_class_list",
"(",
")",
")",
"if",
"not",
"class_list",
":",
"self",
".",
"cache",
"=",
"[",
"]",
"return",
"[",
"]",
"if",
"self",
".",
"cache",
"is",
"not",
"... | 29.642857 | 16.071429 |
def from_array(array):
"""
Deserialize a new UserProfilePhotos from a given dictionary.
:return: new UserProfilePhotos instance.
:rtype: UserProfilePhotos
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import PhotoSize
data = {}
data['total_count'] = int(array.get('total_count'))
data['photos'] = PhotoSize.from_array_list(array.get('photos'), list_level=2)
data['_raw'] = array
return UserProfilePhotos(**data) | [
"def",
"from_array",
"(",
"array",
")",
":",
"if",
"array",
"is",
"None",
"or",
"not",
"array",
":",
"return",
"None",
"# end if",
"assert_type_or_raise",
"(",
"array",
",",
"dict",
",",
"parameter_name",
"=",
"\"array\"",
")",
"from",
"pytgbot",
".",
"api... | 33.473684 | 19.157895 |
def fix_logging_path(config, main_section):
"""
Expand environment variables and user home (~) in the log.file and return
as relative path.
"""
log_file = config.get(main_section, 'log.file')
if log_file:
log_file = os.path.expanduser(os.path.expandvars(log_file))
if os.path.isabs(log_file):
log_file = os.path.relpath(log_file)
return log_file | [
"def",
"fix_logging_path",
"(",
"config",
",",
"main_section",
")",
":",
"log_file",
"=",
"config",
".",
"get",
"(",
"main_section",
",",
"'log.file'",
")",
"if",
"log_file",
":",
"log_file",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"pa... | 35.545455 | 14.090909 |
def ffmpeg_works():
"""Tries to encode images with ffmpeg to check if it works."""
images = np.zeros((2, 32, 32, 3), dtype=np.uint8)
try:
_encode_gif(images, 2)
return True
except (IOError, OSError):
return False | [
"def",
"ffmpeg_works",
"(",
")",
":",
"images",
"=",
"np",
".",
"zeros",
"(",
"(",
"2",
",",
"32",
",",
"32",
",",
"3",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"try",
":",
"_encode_gif",
"(",
"images",
",",
"2",
")",
"return",
"True",
... | 28.125 | 17.625 |
def _initialise_classifier(self, comparison_vectors):
"""Set the centers of the clusters."""
# Set the start point of the classifier.
self.kernel.init = numpy.array(
[[0.05] * len(list(comparison_vectors)),
[0.95] * len(list(comparison_vectors))]) | [
"def",
"_initialise_classifier",
"(",
"self",
",",
"comparison_vectors",
")",
":",
"# Set the start point of the classifier.",
"self",
".",
"kernel",
".",
"init",
"=",
"numpy",
".",
"array",
"(",
"[",
"[",
"0.05",
"]",
"*",
"len",
"(",
"list",
"(",
"comparison... | 41.571429 | 12.428571 |
def _validate_charset(data, charset):
""""Validate that the charset is correct and throw an error if it isn't."""
if len(charset) > 1:
charset_data_length = 0
for symbol_charset in charset:
if symbol_charset not in ('A', 'B', 'C'):
raise Code128.CharsetError
charset_data_length += 2 if symbol_charset is 'C' else 1
if charset_data_length != len(data):
raise Code128.CharsetLengthError
elif len(charset) == 1:
if charset not in ('A', 'B', 'C'):
raise Code128.CharsetError
elif charset is not None:
raise Code128.CharsetError | [
"def",
"_validate_charset",
"(",
"data",
",",
"charset",
")",
":",
"if",
"len",
"(",
"charset",
")",
">",
"1",
":",
"charset_data_length",
"=",
"0",
"for",
"symbol_charset",
"in",
"charset",
":",
"if",
"symbol_charset",
"not",
"in",
"(",
"'A'",
",",
"'B'... | 45.733333 | 7.933333 |
def create(cls, parent=None, **kwargs):
"""Create an object and return it"""
if parent is None:
raise Exception("Parent class is required")
route = copy(parent.route)
if cls.ID_NAME is not None:
route[cls.ID_NAME] = ""
obj = cls(key=parent.key, route=route, config=parent.config)
start = datetime.now()
response = requests.post(obj._url(), auth=(obj.key, ""), data=kwargs)
cls._delay_for_ratelimits(start)
if response.status_code not in cls.TRUTHY_CODES:
return cls._handle_request_exception(response)
# No envelope on post requests
data = response.json()
obj.route[obj.ID_NAME] = data.get("id", data.get(obj.ID_NAME))
obj.data = data
return obj | [
"def",
"create",
"(",
"cls",
",",
"parent",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"parent",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Parent class is required\"",
")",
"route",
"=",
"copy",
"(",
"parent",
".",
"route",
")",
"if",... | 31.04 | 20.64 |
def lstm_unroll(num_lstm_layer, seq_len, num_hidden, num_label, loss_type=None):
"""
Creates an unrolled LSTM symbol for inference if loss_type is not specified, and for training
if loss_type is specified. loss_type must be one of 'ctc' or 'warpctc'
Parameters
----------
num_lstm_layer: int
seq_len: int
num_hidden: int
num_label: int
loss_type: str
'ctc' or 'warpctc'
Returns
-------
mxnet.symbol.symbol.Symbol
"""
# Create the base (shared between training and inference) and add loss to the end
pred = _lstm_unroll_base(num_lstm_layer, seq_len, num_hidden)
if loss_type:
# Training mode, add loss
return _add_ctc_loss(pred, seq_len, num_label, loss_type)
else:
# Inference mode, add softmax
return mx.sym.softmax(data=pred, name='softmax') | [
"def",
"lstm_unroll",
"(",
"num_lstm_layer",
",",
"seq_len",
",",
"num_hidden",
",",
"num_label",
",",
"loss_type",
"=",
"None",
")",
":",
"# Create the base (shared between training and inference) and add loss to the end",
"pred",
"=",
"_lstm_unroll_base",
"(",
"num_lstm_l... | 30.814815 | 24.666667 |
def values(self):
"""
Return the list of values.
"""
def collect(d):
if d is None or d.get('FIRST') is None:
return []
vals = [d['FIRST']]
vals.extend(collect(d.get('REST')))
return vals
return collect(self) | [
"def",
"values",
"(",
"self",
")",
":",
"def",
"collect",
"(",
"d",
")",
":",
"if",
"d",
"is",
"None",
"or",
"d",
".",
"get",
"(",
"'FIRST'",
")",
"is",
"None",
":",
"return",
"[",
"]",
"vals",
"=",
"[",
"d",
"[",
"'FIRST'",
"]",
"]",
"vals",... | 27.363636 | 10.636364 |
def parent_folder(self):
# type: () -> Folder
""" Returns the :class:`Folder <pyOutlook.core.folder.Folder>` this message is in
>>> account = OutlookAccount('')
>>> message = account.get_messages()[0]
>>> message.parent_folder
Inbox
>>> message.parent_folder.unread_count
19
Returns: :class:`Folder <pyOutlook.core.folder.Folder>`
"""
if self.__parent_folder is None:
self.__parent_folder = self.account.get_folder_by_id(self.__parent_folder_id)
return self.__parent_folder | [
"def",
"parent_folder",
"(",
"self",
")",
":",
"# type: () -> Folder",
"if",
"self",
".",
"__parent_folder",
"is",
"None",
":",
"self",
".",
"__parent_folder",
"=",
"self",
".",
"account",
".",
"get_folder_by_id",
"(",
"self",
".",
"__parent_folder_id",
")",
"... | 32.888889 | 19 |
def update_app(self, app):
"""
Loads and runs `update_initial_data` of the specified app. Any dependencies contained within the
initial data class will be run recursively. Dependency cycles are checked for and a cache is built
for updated apps to prevent updating the same app more than once.
:param app: The name of the app to update. This should be the same path as defined
in settings.INSTALLED_APPS
:type app: str
"""
# don't update this app if it has already been updated
if app in self.updated_apps:
return
# load the initial data class
try:
initial_data_class = self.load_app(app)
except ImportError as e:
message = str(e)
# Check if this error is simply the app not having initial data
if 'No module named' in message and 'fixtures' in message:
self.log('No initial data file for {0}'.format(app))
return
else:
# This is an actual import error we should know about
raise
self.log('Checking dependencies for {0}'.format(app))
# get dependency list
dependencies = self.get_dependency_call_list(app)
# update initial data of dependencies
for dependency in dependencies:
self.update_app(dependency)
self.log('Updating app {0}'.format(app))
# Update the initial data of the app and gather any objects returned for deletion. Objects registered for
# deletion can either be returned from the update_initial_data function or programmatically added with the
# register_for_deletion function in the BaseInitialData class.
initial_data_instance = initial_data_class()
model_objs_registered_for_deletion = initial_data_instance.update_initial_data() or []
model_objs_registered_for_deletion.extend(initial_data_instance.get_model_objs_registered_for_deletion())
# Add the objects to be deleted from the app to the global list of objects to be deleted.
self.model_objs_registered_for_deletion.extend(model_objs_registered_for_deletion)
# keep track that this app has been updated
self.updated_apps.add(app) | [
"def",
"update_app",
"(",
"self",
",",
"app",
")",
":",
"# don't update this app if it has already been updated",
"if",
"app",
"in",
"self",
".",
"updated_apps",
":",
"return",
"# load the initial data class",
"try",
":",
"initial_data_class",
"=",
"self",
".",
"load_... | 44.9 | 28.06 |
def sendHeartbeat(self):
"""
Posts the current state to the server.
:param serverURL: the URL to ping.
:return:
"""
for name, md in self.cfg.recordingDevices.items():
try:
data = marshal(md, recordingDeviceFields)
data['serviceURL'] = self.cfg.getServiceURL() + API_PREFIX + '/devices/' + name
targetURL = self.serverURL + API_PREFIX + '/devices/' + name
logger.info("Pinging " + targetURL)
resp = self.httpclient.put(targetURL, json=data)
if resp.status_code != 200:
logger.warning("Unable to ping server at " + targetURL + " with " + str(data.keys()) +
", response is " + str(resp.status_code))
else:
logger.info("Pinged server at " + targetURL + " with " + str(data.items()))
except:
logger.exception("Unable to ping server") | [
"def",
"sendHeartbeat",
"(",
"self",
")",
":",
"for",
"name",
",",
"md",
"in",
"self",
".",
"cfg",
".",
"recordingDevices",
".",
"items",
"(",
")",
":",
"try",
":",
"data",
"=",
"marshal",
"(",
"md",
",",
"recordingDeviceFields",
")",
"data",
"[",
"'... | 49.2 | 22.5 |
async def blobize(self, elem=None, elem_type=None, params=None):
"""
Main blobbing
:param elem:
:param elem_type:
:param params:
:return:
"""
if self.writing:
await self.field(elem=elem, elem_type=elem_type, params=params)
return bytes(self.iobj.buffer)
else:
return await self.field(elem=elem, elem_type=elem_type, params=params) | [
"async",
"def",
"blobize",
"(",
"self",
",",
"elem",
"=",
"None",
",",
"elem_type",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"self",
".",
"writing",
":",
"await",
"self",
".",
"field",
"(",
"elem",
"=",
"elem",
",",
"elem_type",
"="... | 32.769231 | 18.615385 |
def populate_local_sch_cache(self, fw_dict):
"""Populate the local cache from FW DB after restart. """
for fw_id in fw_dict:
fw_data = fw_dict.get(fw_id)
mgmt_ip = fw_data.get('fw_mgmt_ip')
dev_status = fw_data.get('device_status')
if dev_status == 'SUCCESS':
new = True
else:
new = False
if mgmt_ip is not None:
drvr_dict, mgmt_ip = self.sched_obj.populate_fw_dev(fw_id,
mgmt_ip,
new)
if drvr_dict is None or mgmt_ip is None:
LOG.info("Pop cache for FW sch: drvr_dict or mgmt_ip "
"is None") | [
"def",
"populate_local_sch_cache",
"(",
"self",
",",
"fw_dict",
")",
":",
"for",
"fw_id",
"in",
"fw_dict",
":",
"fw_data",
"=",
"fw_dict",
".",
"get",
"(",
"fw_id",
")",
"mgmt_ip",
"=",
"fw_data",
".",
"get",
"(",
"'fw_mgmt_ip'",
")",
"dev_status",
"=",
... | 47.823529 | 14.352941 |
def DeregisterOutput(cls, output_class):
"""Deregisters an output class.
The output classes are identified based on their NAME attribute.
Args:
output_class (type): output module class.
Raises:
KeyError: if output class is not set for the corresponding data type.
"""
output_class_name = output_class.NAME.lower()
if output_class_name in cls._disabled_output_classes:
class_dict = cls._disabled_output_classes
else:
class_dict = cls._output_classes
if output_class_name not in class_dict:
raise KeyError(
'Output class not set for name: {0:s}.'.format(
output_class.NAME))
del class_dict[output_class_name] | [
"def",
"DeregisterOutput",
"(",
"cls",
",",
"output_class",
")",
":",
"output_class_name",
"=",
"output_class",
".",
"NAME",
".",
"lower",
"(",
")",
"if",
"output_class_name",
"in",
"cls",
".",
"_disabled_output_classes",
":",
"class_dict",
"=",
"cls",
".",
"_... | 28.458333 | 20.208333 |
def vssa(self):
r'''The volume-specific surface area of a particle size distribution.
Note this uses the diameters provided by the method `Dis`.
.. math::
\text{VSSA} = \sum_i \text{fraction}_i \frac{SA_i}{V_i}
Returns
-------
VSSA : float
The volume-specific surface area of the distribution, [m^2/m^3]
References
----------
.. [1] ISO 9276-2:2014 - Representation of Results of Particle Size
Analysis - Part 2: Calculation of Average Particle Sizes/Diameters
and Moments from Particle Size Distributions.
'''
ds = self.Dis
Vs = [pi/6*di**3 for di in ds]
SAs = [pi*di**2 for di in ds]
SASs = [SA/V for SA, V in zip(SAs, Vs)]
VSSA = sum([fi*SASi for fi, SASi in zip(self.fractions, SASs)])
return VSSA | [
"def",
"vssa",
"(",
"self",
")",
":",
"ds",
"=",
"self",
".",
"Dis",
"Vs",
"=",
"[",
"pi",
"/",
"6",
"*",
"di",
"**",
"3",
"for",
"di",
"in",
"ds",
"]",
"SAs",
"=",
"[",
"pi",
"*",
"di",
"**",
"2",
"for",
"di",
"in",
"ds",
"]",
"SASs",
... | 37.695652 | 24.478261 |
def parity_discover_next_available_nonce(
web3: Web3,
address: AddressHex,
) -> Nonce:
"""Returns the next available nonce for `address`."""
next_nonce_encoded = web3.manager.request_blocking('parity_nextNonce', [address])
return Nonce(int(next_nonce_encoded, 16)) | [
"def",
"parity_discover_next_available_nonce",
"(",
"web3",
":",
"Web3",
",",
"address",
":",
"AddressHex",
",",
")",
"->",
"Nonce",
":",
"next_nonce_encoded",
"=",
"web3",
".",
"manager",
".",
"request_blocking",
"(",
"'parity_nextNonce'",
",",
"[",
"address",
... | 40.857143 | 16.142857 |
def _recv_loop(self):
"""
Waits for data forever and feeds the input queue.
"""
while True:
try:
data = self._socket.recv(4096)
self._ibuffer += data
while '\r\n' in self._ibuffer:
line, self._ibuffer = self._ibuffer.split('\r\n', 1)
self.iqueue.put(line)
except Exception:
break | [
"def",
"_recv_loop",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"data",
"=",
"self",
".",
"_socket",
".",
"recv",
"(",
"4096",
")",
"self",
".",
"_ibuffer",
"+=",
"data",
"while",
"'\\r\\n'",
"in",
"self",
".",
"_ibuffer",
":",
"line",
... | 32.846154 | 12.230769 |
def add_candidate_peer_endpoints(self, peer_endpoints):
"""Adds candidate endpoints to the list of endpoints to
attempt to peer with.
Args:
peer_endpoints ([str]): A list of public uri's which the
validator can attempt to peer with.
"""
if self._topology:
self._topology.add_candidate_peer_endpoints(peer_endpoints)
else:
LOGGER.debug("Could not add peer endpoints to topology. "
"ConnectionManager does not exist.") | [
"def",
"add_candidate_peer_endpoints",
"(",
"self",
",",
"peer_endpoints",
")",
":",
"if",
"self",
".",
"_topology",
":",
"self",
".",
"_topology",
".",
"add_candidate_peer_endpoints",
"(",
"peer_endpoints",
")",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"\"Coul... | 40.769231 | 19.538462 |
def limit_mem(limit=(4 * 1024**3)):
"Set soft memory limit"
rsrc = resource.RLIMIT_DATA
soft, hard = resource.getrlimit(rsrc)
resource.setrlimit(rsrc, (limit, hard)) # 4GB
softnew, _ = resource.getrlimit(rsrc)
assert softnew == limit
_log = logging.getLogger(__name__)
_log.debug('Set soft memory limit: %s => %s', soft, softnew) | [
"def",
"limit_mem",
"(",
"limit",
"=",
"(",
"4",
"*",
"1024",
"**",
"3",
")",
")",
":",
"rsrc",
"=",
"resource",
".",
"RLIMIT_DATA",
"soft",
",",
"hard",
"=",
"resource",
".",
"getrlimit",
"(",
"rsrc",
")",
"resource",
".",
"setrlimit",
"(",
"rsrc",
... | 35.4 | 11.8 |
def add_from_depend(self, node, from_module):
"""add dependencies created by from-imports
"""
mod_name = node.root().name
obj = self.module(mod_name)
if from_module not in obj.node.depends:
obj.node.depends.append(from_module) | [
"def",
"add_from_depend",
"(",
"self",
",",
"node",
",",
"from_module",
")",
":",
"mod_name",
"=",
"node",
".",
"root",
"(",
")",
".",
"name",
"obj",
"=",
"self",
".",
"module",
"(",
"mod_name",
")",
"if",
"from_module",
"not",
"in",
"obj",
".",
"nod... | 38.857143 | 4.285714 |
def extract_attr_for_match(items, **kwargs):
"""Helper method to get attribute value for an item matching some criterion.
Specify target criteria value as dict, with target attribute having value -1
Example:
to extract state of vpc matching given vpc id
response = [{'State': 'available', 'VpcId': 'vpc-2bb1584c'}]
extract_attr_for_match(response, State=-1, VpcId='vpc-2bb1584c') #=> 'available'"""
# find the value of attribute to return
query_arg = None
for arg, value in kwargs.items():
if value == -1:
assert query_arg is None, "Only single query arg (-1 valued) is allowed"
query_arg = arg
result = []
filterset = set(kwargs.keys())
for item in items:
match = True
assert filterset.issubset(
item.keys()), "Filter set contained %s which was not in record %s" % (
filterset.difference(item.keys()),
item)
for arg in item:
if arg == query_arg:
continue
if arg in kwargs:
if item[arg] != kwargs[arg]:
match = False
break
if match:
result.append(item[query_arg])
assert len(result) <= 1, "%d values matched %s, only allow 1" % (
len(result), kwargs)
if result:
return result[0]
return None | [
"def",
"extract_attr_for_match",
"(",
"items",
",",
"*",
"*",
"kwargs",
")",
":",
"# find the value of attribute to return",
"query_arg",
"=",
"None",
"for",
"arg",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"value",
"==",
"-",
"1",
"... | 30.769231 | 20.333333 |
def __register_driver(self, channel, webdriver):
"Register webdriver to a channel."
# Add to list of webdrivers to cleanup.
if not self.__registered_drivers.has_key(channel):
self.__registered_drivers[channel] = [] # set to new empty array
self.__registered_drivers[channel].append(webdriver)
# Set singleton instance for the channel
self.__webdriver[channel] = webdriver | [
"def",
"__register_driver",
"(",
"self",
",",
"channel",
",",
"webdriver",
")",
":",
"# Add to list of webdrivers to cleanup.",
"if",
"not",
"self",
".",
"__registered_drivers",
".",
"has_key",
"(",
"channel",
")",
":",
"self",
".",
"__registered_drivers",
"[",
"c... | 38.636364 | 20.454545 |
def send_email_confirmation_instructions(self, user):
"""
Sends the confirmation instructions email for the specified user.
Sends signal `confirm_instructions_sent`.
:param user: The user to send the instructions to.
"""
token = self.security_utils_service.generate_confirmation_token(user)
confirmation_link = url_for('security_controller.confirm_email',
token=token, _external=True)
self.send_mail(
_('flask_unchained.bundles.security:email_subject.email_confirmation_instructions'),
to=user.email,
template='security/email/email_confirmation_instructions.html',
user=user,
confirmation_link=confirmation_link)
confirm_instructions_sent.send(app._get_current_object(), user=user,
token=token) | [
"def",
"send_email_confirmation_instructions",
"(",
"self",
",",
"user",
")",
":",
"token",
"=",
"self",
".",
"security_utils_service",
".",
"generate_confirmation_token",
"(",
"user",
")",
"confirmation_link",
"=",
"url_for",
"(",
"'security_controller.confirm_email'",
... | 46.578947 | 23.210526 |
def _remove_empty_items(d, required):
"""Return a new dict with any empty items removed.
Note that this is not a deep check. If d contains a dictionary which
itself contains empty items, those are never checked.
This method exists to make to_serializable() functions cleaner.
We could revisit this some day, but for now, the serialized objects are
stripped of empty values to keep the output YAML more compact.
Args:
d: a dictionary
required: list of required keys (for example, TaskDescriptors always emit
the "task-id", even if None)
Returns:
A dictionary with empty items removed.
"""
new_dict = {}
for k, v in d.items():
if k in required:
new_dict[k] = v
elif isinstance(v, int) or v:
# "if v" would suppress emitting int(0)
new_dict[k] = v
return new_dict | [
"def",
"_remove_empty_items",
"(",
"d",
",",
"required",
")",
":",
"new_dict",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"required",
":",
"new_dict",
"[",
"k",
"]",
"=",
"v",
"elif",
"isinstance... | 28.821429 | 22.571429 |
def _init_object(self, catalog_id, proxy, runtime, db_name, cat_name, cat_class):
"""Initialize this session an OsidObject based session."""
self._catalog_identifier = None
self._init_proxy_and_runtime(proxy, runtime)
uses_cataloging = False
if catalog_id is not None and catalog_id.get_identifier() != PHANTOM_ROOT_IDENTIFIER:
self._catalog_identifier = catalog_id.get_identifier()
config = self._runtime.get_configuration()
parameter_id = Id('parameter:' + db_name + 'CatalogingProviderImpl@mongo')
try:
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
except (AttributeError, KeyError, errors.NotFound):
collection = JSONClientValidated(db_name,
collection=cat_name,
runtime=self._runtime)
try:
self._my_catalog_map = collection.find_one({'_id': ObjectId(self._catalog_identifier)})
except errors.NotFound:
if catalog_id.get_identifier_namespace() != db_name + '.' + cat_name:
self._my_catalog_map = self._create_orchestrated_cat(catalog_id, db_name, cat_name)
else:
raise errors.NotFound('could not find catalog identifier ' + catalog_id.get_identifier() + cat_name)
else:
uses_cataloging = True
cataloging_manager = self._runtime.get_manager('CATALOGING',
provider_impl) # need to add version argument
lookup_session = cataloging_manager.get_catalog_lookup_session()
# self._my_catalog_map = lookup_session.get_catalog(catalog_id)._my_map
# self._catalog = Catalog(osid_object_map=self._my_catalog_map, runtime=self._runtime,
# proxy=self._proxy)
self._catalog = lookup_session.get_catalog(catalog_id)
else:
self._catalog_identifier = PHANTOM_ROOT_IDENTIFIER
self._my_catalog_map = make_catalog_map(cat_name, identifier=self._catalog_identifier)
if not uses_cataloging:
self._catalog = cat_class(osid_object_map=self._my_catalog_map, runtime=self._runtime, proxy=self._proxy)
self._catalog._authority = self._authority # there should be a better way...
self._catalog_id = self._catalog.get_id()
self._forms = dict() | [
"def",
"_init_object",
"(",
"self",
",",
"catalog_id",
",",
"proxy",
",",
"runtime",
",",
"db_name",
",",
"cat_name",
",",
"cat_class",
")",
":",
"self",
".",
"_catalog_identifier",
"=",
"None",
"self",
".",
"_init_proxy_and_runtime",
"(",
"proxy",
",",
"run... | 58.431818 | 33.431818 |
def add(self, line):
"""
Append 'line' to contents
where 'line' is an entire line or a list of lines.
If self.unique is False it will add regardless of contents.
Multi-line strings are converted to a list delimited by new lines.
:param line: String or List of Strings; arbitrary string(s) to append to file contents.
:return: Boolean; whether contents were changed during this method call.
"""
if self.unique is not False and self.unique is not True:
raise AttributeError("Attribute 'unique' is not True or False.")
self.log('add({0}); unique={1}'.format(line, self.unique))
if line is False:
return False
if isinstance(line, str):
line = line.split('\n')
if not isinstance(line, list):
raise TypeError("Parameter 'line' not a 'string' or 'list', is {0}".format(type(line)))
local_changes = False
for this in line:
if self.unique is False or this not in self.contents:
self.contents.append(this)
self.changed = local_changes = True
if self.sorted and local_changes:
self.sort()
return local_changes | [
"def",
"add",
"(",
"self",
",",
"line",
")",
":",
"if",
"self",
".",
"unique",
"is",
"not",
"False",
"and",
"self",
".",
"unique",
"is",
"not",
"True",
":",
"raise",
"AttributeError",
"(",
"\"Attribute 'unique' is not True or False.\"",
")",
"self",
".",
"... | 41.965517 | 21 |
def add_view_menu(self, name):
"""
Adds a view or menu to the backend, model view_menu
param name:
name of the view menu to add
"""
view_menu = self.find_view_menu(name)
if view_menu is None:
try:
view_menu = self.viewmenu_model()
view_menu.name = name
self.get_session.add(view_menu)
self.get_session.commit()
return view_menu
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_VIEWMENU.format(str(e)))
self.get_session.rollback()
return view_menu | [
"def",
"add_view_menu",
"(",
"self",
",",
"name",
")",
":",
"view_menu",
"=",
"self",
".",
"find_view_menu",
"(",
"name",
")",
"if",
"view_menu",
"is",
"None",
":",
"try",
":",
"view_menu",
"=",
"self",
".",
"viewmenu_model",
"(",
")",
"view_menu",
".",
... | 36.111111 | 9.888889 |
def examples(self):
"""Return examples from all sub-spaces."""
for examples in product(*[spc.examples for spc in self.spaces]):
name = ', '.join(name for name, _ in examples)
element = self.element([elem for _, elem in examples])
yield (name, element) | [
"def",
"examples",
"(",
"self",
")",
":",
"for",
"examples",
"in",
"product",
"(",
"*",
"[",
"spc",
".",
"examples",
"for",
"spc",
"in",
"self",
".",
"spaces",
"]",
")",
":",
"name",
"=",
"', '",
".",
"join",
"(",
"name",
"for",
"name",
",",
"_",... | 49.666667 | 17.333333 |
def exhaustive_ontology_ilx_diff_row_only( self, ontology_row: dict ) -> dict:
''' WARNING RUNTIME IS AWEFUL '''
results = []
header = ['Index'] + list(self.existing_ids.columns)
for row in self.existing_ids.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
check_list = [
{
'external_ontology_row': ontology_row,
'ilx_rows': [row],
},
]
# First layer for each external row. Second is for each potential ilx row. It's simple here 1-1.
result = self.__exhaustive_diff(check_list)[0][0]
if result['same']:
results.append(result)
return results | [
"def",
"exhaustive_ontology_ilx_diff_row_only",
"(",
"self",
",",
"ontology_row",
":",
"dict",
")",
"->",
"dict",
":",
"results",
"=",
"[",
"]",
"header",
"=",
"[",
"'Index'",
"]",
"+",
"list",
"(",
"self",
".",
"existing_ids",
".",
"columns",
")",
"for",
... | 43.529412 | 19.764706 |
def retrieve_bicluster(self, df, row_no, column_no):
"""
Extracts the bicluster at the given row bicluster number and the column bicluster number from the input dataframe.
:param df: the input dataframe whose values were biclustered
:param row_no: the number of the row bicluster
:param column_no: the number of the column bicluster
:return: the extracted bicluster from the dataframe
"""
res = df[self.model.biclusters_[0][row_no]]
bicluster = res[res.columns[self.model.biclusters_[1][column_no]]]
return bicluster | [
"def",
"retrieve_bicluster",
"(",
"self",
",",
"df",
",",
"row_no",
",",
"column_no",
")",
":",
"res",
"=",
"df",
"[",
"self",
".",
"model",
".",
"biclusters_",
"[",
"0",
"]",
"[",
"row_no",
"]",
"]",
"bicluster",
"=",
"res",
"[",
"res",
".",
"colu... | 48.833333 | 23 |
def quality(self):
"""Return a dict filled with metrics related to the inner
quality of the dataset:
* number of tags
* description length
* and so on
"""
from udata.models import Discussion # noqa: Prevent circular imports
result = {}
if not self.id:
# Quality is only relevant on saved Datasets
return result
if self.next_update:
result['frequency'] = self.frequency
result['update_in'] = -(self.next_update - datetime.now()).days
if self.tags:
result['tags_count'] = len(self.tags)
if self.description:
result['description_length'] = len(self.description)
if self.resources:
result['has_resources'] = True
result['has_only_closed_or_no_formats'] = all(
resource.closed_or_no_format for resource in self.resources)
result['has_unavailable_resources'] = not all(
self.check_availability())
discussions = Discussion.objects(subject=self)
if discussions:
result['discussions'] = len(discussions)
result['has_untreated_discussions'] = not all(
discussion.person_involved(self.owner)
for discussion in discussions)
result['score'] = self.compute_quality_score(result)
return result | [
"def",
"quality",
"(",
"self",
")",
":",
"from",
"udata",
".",
"models",
"import",
"Discussion",
"# noqa: Prevent circular imports",
"result",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"id",
":",
"# Quality is only relevant on saved Datasets",
"return",
"result",
"... | 39.742857 | 16.657143 |
def add_options(self):
""" Add program options.
"""
super(RtorrentQueueManager, self).add_options()
self.jobs = None
self.httpd = None
# basic options
self.add_bool_option("-n", "--dry-run",
help="advise jobs not to do any real work, just tell what would happen")
self.add_bool_option("--no-fork", "--fg", help="Don't fork into background (stay in foreground and log to console)")
self.add_bool_option("--stop", help="Stop running daemon")
self.add_bool_option("--restart", help="Stop running daemon, then fork into background")
self.add_bool_option("-?", "--status", help="Check daemon status")
self.add_value_option("--pid-file", "PATH",
help="file holding the process ID of the daemon, when running in background")
self.add_value_option("--guard-file", "PATH",
help="guard file for the process watchdog") | [
"def",
"add_options",
"(",
"self",
")",
":",
"super",
"(",
"RtorrentQueueManager",
",",
"self",
")",
".",
"add_options",
"(",
")",
"self",
".",
"jobs",
"=",
"None",
"self",
".",
"httpd",
"=",
"None",
"# basic options",
"self",
".",
"add_bool_option",
"(",
... | 51.722222 | 25.555556 |
def calc_requiredremoterelease_v2(self):
"""Get the required remote release of the last simulation step.
Required log sequence:
|LoggedRequiredRemoteRelease|
Calculated flux sequence:
|RequiredRemoteRelease|
Basic equation:
:math:`RequiredRemoteRelease = LoggedRequiredRemoteRelease`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> logs.loggedrequiredremoterelease = 3.0
>>> model.calc_requiredremoterelease_v2()
>>> fluxes.requiredremoterelease
requiredremoterelease(3.0)
"""
flu = self.sequences.fluxes.fastaccess
log = self.sequences.logs.fastaccess
flu.requiredremoterelease = log.loggedrequiredremoterelease[0] | [
"def",
"calc_requiredremoterelease_v2",
"(",
"self",
")",
":",
"flu",
"=",
"self",
".",
"sequences",
".",
"fluxes",
".",
"fastaccess",
"log",
"=",
"self",
".",
"sequences",
".",
"logs",
".",
"fastaccess",
"flu",
".",
"requiredremoterelease",
"=",
"log",
".",... | 29.958333 | 15.958333 |
def threshold_image(img, bkground_thresh, bkground_value=0.0):
"""
Thresholds a given image at a value or percentile.
Replacement value can be specified too.
Parameters
-----------
image_in : ndarray
Input image
bkground_thresh : float
a threshold value to identify the background
bkground_value : float
a value to fill the background elements with. Default 0.
Returns
-------
thresholded_image : ndarray
thresholded and/or filled image
"""
if bkground_thresh is None:
return img
if isinstance(bkground_thresh, str):
try:
thresh_perc = float(bkground_thresh.replace('%', ''))
except:
raise ValueError(
'percentile specified could not be parsed correctly '
' - must be a string of the form "5%", "10%" etc')
else:
thresh_value = np.percentile(img, thresh_perc)
elif isinstance(bkground_thresh, (float, int)):
thresh_value = bkground_thresh
else:
raise ValueError('Invalid specification for background threshold.')
img[img < thresh_value] = bkground_value
return img | [
"def",
"threshold_image",
"(",
"img",
",",
"bkground_thresh",
",",
"bkground_value",
"=",
"0.0",
")",
":",
"if",
"bkground_thresh",
"is",
"None",
":",
"return",
"img",
"if",
"isinstance",
"(",
"bkground_thresh",
",",
"str",
")",
":",
"try",
":",
"thresh_perc... | 25.021739 | 23.23913 |
def show_disk(name, call=None):
'''
Show the disk details of the instance
CLI Examples:
.. code-block:: bash
salt-cloud -a show_disk aliyun myinstance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_disks action must be called with -a or --action.'
)
ret = {}
params = {
'Action': 'DescribeInstanceDisks',
'InstanceId': name
}
items = query(params=params)
for disk in items['Disks']['Disk']:
ret[disk['DiskId']] = {}
for item in disk:
ret[disk['DiskId']][item] = six.text_type(disk[item])
return ret | [
"def",
"show_disk",
"(",
"name",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The show_disks action must be called with -a or --action.'",
")",
"ret",
"=",
"{",
"}",
"params",
"=",
"{",
"'Action'... | 22.142857 | 22.714286 |
def show_network(self):
"""!
@brief Shows structure of the network: neurons and connections between them.
"""
dimension = len(self.__location[0])
if (dimension != 3) and (dimension != 2):
raise NameError('Network that is located in different from 2-d and 3-d dimensions can not be represented')
(fig, axes) = self.__create_surface(dimension)
for i in range(0, self.__num_osc, 1):
if dimension == 2:
axes.plot(self.__location[i][0], self.__location[i][1], 'bo')
for j in range(i, self.__num_osc, 1): # draw connection between two points only one time
if self.__weights[i][j] > 0.0:
axes.plot([self.__location[i][0], self.__location[j][0]], [self.__location[i][1], self.__location[j][1]], 'b-', linewidth = 0.5)
elif dimension == 3:
axes.scatter(self.__location[i][0], self.__location[i][1], self.__location[i][2], c = 'b', marker = 'o')
for j in range(i, self.__num_osc, 1): # draw connection between two points only one time
if self.__weights[i][j] > 0.0:
axes.plot([self.__location[i][0], self.__location[j][0]], [self.__location[i][1], self.__location[j][1]], [self.__location[i][2], self.__location[j][2]], 'b-', linewidth = 0.5)
plt.grid()
plt.show() | [
"def",
"show_network",
"(",
"self",
")",
":",
"dimension",
"=",
"len",
"(",
"self",
".",
"__location",
"[",
"0",
"]",
")",
"if",
"(",
"dimension",
"!=",
"3",
")",
"and",
"(",
"dimension",
"!=",
"2",
")",
":",
"raise",
"NameError",
"(",
"'Network that... | 53.392857 | 35.357143 |
def _update_class(self, oldclass, newclass):
"""Update a class object."""
olddict = oldclass.__dict__
newdict = newclass.__dict__
oldnames = set(olddict)
newnames = set(newdict)
for name in newnames - oldnames:
setattr(oldclass, name, newdict[name])
notify_info0('Added:', name, 'to', oldclass)
self.found_change = True
# Note: not removing old things...
# for name in oldnames - newnames:
# notify_info('Removed:', name, 'from', oldclass)
# delattr(oldclass, name)
for name in (oldnames & newnames) - set(['__dict__', '__doc__']):
self._update(oldclass, name, olddict[name], newdict[name], is_class_namespace=True)
old_bases = getattr(oldclass, '__bases__', None)
new_bases = getattr(newclass, '__bases__', None)
if str(old_bases) != str(new_bases):
notify_error('Changing the hierarchy of a class is not supported. %s may be inconsistent.' % (oldclass,))
self._handle_namespace(oldclass, is_class_namespace=True) | [
"def",
"_update_class",
"(",
"self",
",",
"oldclass",
",",
"newclass",
")",
":",
"olddict",
"=",
"oldclass",
".",
"__dict__",
"newdict",
"=",
"newclass",
".",
"__dict__",
"oldnames",
"=",
"set",
"(",
"olddict",
")",
"newnames",
"=",
"set",
"(",
"newdict",
... | 40 | 20.592593 |
def get_next_version() -> str:
"""
Returns: next version for this Git repository
"""
LOGGER.info('computing next version')
should_be_alpha = bool(CTX.repo.get_current_branch() != 'master')
LOGGER.info('alpha: %s', should_be_alpha)
calver = _get_calver()
LOGGER.info('current calver: %s', calver)
calver_tags = _get_current_calver_tags(calver)
LOGGER.info('found %s matching tags for this calver', len(calver_tags))
next_stable_version = _next_stable_version(calver, calver_tags)
LOGGER.info('next stable version: %s', next_stable_version)
if should_be_alpha:
return _next_alpha_version(next_stable_version, calver_tags)
return next_stable_version | [
"def",
"get_next_version",
"(",
")",
"->",
"str",
":",
"LOGGER",
".",
"info",
"(",
"'computing next version'",
")",
"should_be_alpha",
"=",
"bool",
"(",
"CTX",
".",
"repo",
".",
"get_current_branch",
"(",
")",
"!=",
"'master'",
")",
"LOGGER",
".",
"info",
... | 40.882353 | 15.470588 |
def get_victoria_day(self, year):
"""
Return Victoria Day for Edinburgh.
Set to the Monday strictly before May 24th. It means that if May 24th
is a Monday, it's shifted to the week before.
"""
may_24th = date(year, 5, 24)
# Since "MON(day) == 0", it's either the difference between MON and the
# current weekday (starting at 0), or 7 days before the May 24th
shift = may_24th.weekday() or 7
victoria_day = may_24th - timedelta(days=shift)
return (victoria_day, "Victoria Day") | [
"def",
"get_victoria_day",
"(",
"self",
",",
"year",
")",
":",
"may_24th",
"=",
"date",
"(",
"year",
",",
"5",
",",
"24",
")",
"# Since \"MON(day) == 0\", it's either the difference between MON and the",
"# current weekday (starting at 0), or 7 days before the May 24th",
"shi... | 42.538462 | 15 |
def download(url, tries=DEFAULT_TRIES, retry_delay=RETRY_DELAY,
try_timeout=None, proxies=None, verify=True):
"""
Descarga un archivo a través del protocolo HTTP, en uno o más intentos.
Args:
url (str): URL (schema HTTP) del archivo a descargar.
tries (int): Intentos a realizar (default: 1).
retry_delay (int o float): Tiempo a esperar, en segundos, entre cada
intento.
try_timeout (int o float): Tiempo máximo a esperar por intento.
proxies (dict): Proxies a utilizar. El diccionario debe contener los
valores 'http' y 'https', cada uno asociados a la URL del proxy
correspondiente.
Returns:
bytes: Contenido del archivo
"""
for i in range(tries):
try:
return requests.get(url, timeout=try_timeout, proxies=proxies,
verify=verify).content
except Exception as e:
download_exception = e
if i < tries - 1:
time.sleep(retry_delay)
raise download_exception | [
"def",
"download",
"(",
"url",
",",
"tries",
"=",
"DEFAULT_TRIES",
",",
"retry_delay",
"=",
"RETRY_DELAY",
",",
"try_timeout",
"=",
"None",
",",
"proxies",
"=",
"None",
",",
"verify",
"=",
"True",
")",
":",
"for",
"i",
"in",
"range",
"(",
"tries",
")",... | 36.344828 | 21.862069 |
def post_login(cookie, tokens, username, password, rsakey, verifycode='',
codestring=''):
'''登录验证.
password - 使用RSA加密后的base64字符串
rsakey - 与public_key相匹配的rsakey
verifycode - 验证码, 默认为空
@return (status, info). 其中, status表示返回的状态:
0 - 正常, 这里, info里面存放的是auth_cookie
-1 - 未知异常
4 - 密码错误
257 - 需要输入验证码, 此时info里面存放着(vcodetype, codeString))
'''
url = const.PASSPORT_LOGIN
data = ''.join([
'staticpage=https%3A%2F%2Fpassport.baidu.com%2Fstatic%2Fpasspc-account%2Fhtml%2Fv3Jump.html',
'&charset=UTF-8',
'&token=', tokens['token'],
'&tpl=pp&subpro=&apiver=v3',
'&tt=', util.timestamp(),
'&codestring=', codestring,
'&safeflg=0&u=http%3A%2F%2Fpassport.baidu.com%2F',
'&isPhone=',
'&quick_user=0&logintype=basicLogin&logLoginType=pc_loginBasic&idc=',
'&loginmerge=true',
'&username=', encoder.encode_uri_component(username),
'&password=', encoder.encode_uri_component(password),
'&verifycode=', verifycode,
'&mem_pass=on',
'&rsakey=', rsakey,
'&crypttype=12',
'&ppui_logintime=',get_ppui_logintime(),
'&callback=parent.bd__pcbs__28g1kg',
])
headers={
'Accept': const.ACCEPT_HTML,
'Cookie': cookie.sub_output('BAIDUID','HOSUPPORT', 'UBI'),
'Referer': const.REFERER,
'Connection': 'Keep-Alive',
}
req = net.urlopen(url, headers=headers, data=data.encode())
if req:
content= req.data.decode()
match = re.search('"(err_no[^"]+)"', content)
if not match:
return (-1, None)
query = dict(urllib.parse.parse_qsl(match.group(1)))
query['err_no'] = int(query['err_no'])
err_no = query['err_no']
auth_cookie = req.headers.get_all('Set-Cookie')
if err_no == 0:
return (0, auth_cookie)
# #!! not bind cellphone
elif err_no == 18:
return (0, auth_cookie)
# 要输入验证码
elif err_no == 257:
return (err_no, query)
# 需要短信验证
elif err_no == 400031:
return (err_no, query)
else:
return (err_no, None)
else:
return (-1, None)
return (-1, None) | [
"def",
"post_login",
"(",
"cookie",
",",
"tokens",
",",
"username",
",",
"password",
",",
"rsakey",
",",
"verifycode",
"=",
"''",
",",
"codestring",
"=",
"''",
")",
":",
"url",
"=",
"const",
".",
"PASSPORT_LOGIN",
"data",
"=",
"''",
".",
"join",
"(",
... | 33.059701 | 16.671642 |
def load_term_config(filter_name,
term_name,
filter_options=None,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
source_service=None,
destination_service=None,
**term_fields):
'''
Generate and load the configuration of a policy term.
filter_name
The name of the policy filter.
term_name
The name of the term.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
If the pillar contains the following structure:
.. code-block:: yaml
firewall:
- my-filter:
terms:
- my-term:
source_port: 1234
source_address:
- 1.2.3.4/32
- 5.6.7.8/32
The ``pillar_key`` field would be specified as ``firewall``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The properties specified through the CLI have higher priority than the pillar.
revision_id
Add a comment in the term config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the term configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
source_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a source_port and protocol.
As this module is available on Unix platforms only,
it reads the IANA_ port assignment from /etc/services.
If the user requires additional shortcuts to be referenced, they can add entries under /etc/services,
which can be managed using the :mod:`file state <salt.states.file>`.
.. _IANA: http://www.iana.org/assignments/port-numbers
destination_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a destination_port and protocol.
Allows the same options as ``source_service``.
term_fields
Term attributes. To see what fields are supported, please consult the
list of supported keywords_. Some platforms have a few other optional_
keywords.
.. _keywords: https://github.com/google/capirca/wiki/Policy-format#keywords
.. _optional: https://github.com/google/capirca/wiki/Policy-format#optionally-supported-keywords
.. note::
The following fields are accepted (some being platform-specific):
- action
- address
- address_exclude
- comment
- counter
- expiration
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- log_name
- loss_priority
- option
- policer
- port
- precedence
- principals
- protocol
- protocol_except
- qos
- pan_application
- routing_instance
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- packet_length
- fragment_offset
- hop_limit
- icmp_type
- ether_type
- traffic_class_count
- traffic_type
- translated
- dscp_set
- dscp_match
- dscp_except
- next_ip
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- vpn
- source_tag
- destination_tag
- source_interface
- destination_interface
- flattened
- flattened_addr
- flattened_saddr
- flattened_daddr
- priority
.. note::
The following fields can be also a single value and a list of values:
- action
- address
- address_exclude
- comment
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- option
- port
- precedence
- principals
- protocol
- protocol_except
- pan_application
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- icmp_type
- ether_type
- traffic_type
- dscp_match
- dscp_except
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- source_tag
- destination_tag
- source_service
- destination_service
Example: ``destination_address`` can be either defined as:
.. code-block:: yaml
destination_address: 172.17.17.1/24
or as a list of destination IP addresses:
.. code-block:: yaml
destination_address:
- 172.17.17.1/24
- 172.17.19.1/24
or a list of services to be matched:
.. code-block:: yaml
source_service:
- ntp
- snmp
- ldap
- bgpd
.. note::
The port fields ``source_port`` and ``destination_port`` can be used as above to select either
a single value, either a list of values, but also they can select port ranges. Example:
.. code-block:: yaml
source_port:
- - 1000
- 2000
- - 3000
- 4000
With the configuration above, the user is able to select the 1000-2000 and 3000-4000 source port ranges.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_term_config filter-name term-name source_address=1.2.3.4 destination_address=5.6.7.8 action=accept test=True debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
Configuration discarded.
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter filter-name {
+ interface-specific;
+ term term-name {
+ from {
+ source-address {
+ 1.2.3.4/32;
+ }
+ destination-address {
+ 5.6.7.8/32;
+ }
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter filter-name {
interface-specific;
term term-name {
from {
source-address {
1.2.3.4/32;
}
destination-address {
5.6.7.8/32;
}
}
then accept;
}
}
}
}
result:
True
'''
if not filter_options:
filter_options = []
platform = _get_capirca_platform()
term_config = __salt__['capirca.get_term_config'](platform,
filter_name,
term_name,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format,
source_service=source_service,
destination_service=destination_service,
**term_fields)
return __salt__['net.load_config'](text=term_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) | [
"def",
"load_term_config",
"(",
"filter_name",
",",
"term_name",
",",
"filter_options",
"=",
"None",
",",
"pillar_key",
"=",
"'acl'",
",",
"pillarenv",
"=",
"None",
",",
"saltenv",
"=",
"None",
",",
"merge_pillar",
"=",
"True",
",",
"revision_id",
"=",
"None... | 32.785714 | 22.5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.