repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
mbj4668/pyang | pyang/yang_parser.py | YangTokenizer.skip | def skip(self):
"""Skip whitespace and count position"""
buflen = len(self.buf)
while True:
self.buf = self.buf.lstrip()
if self.buf == '':
self.readline()
buflen = len(self.buf)
else:
self.offset += (buflen - len(self.buf))
break
# do not keep comments in the syntax tree
if not self.keep_comments:
# skip line comment
if self.buf[0] == '/':
if self.buf[1] == '/':
self.readline()
return self.skip()
# skip block comment
elif self.buf[1] == '*':
i = self.buf.find('*/')
while i == -1:
self.readline()
i = self.buf.find('*/')
self.set_buf(i+2)
return self.skip() | python | def skip(self):
"""Skip whitespace and count position"""
buflen = len(self.buf)
while True:
self.buf = self.buf.lstrip()
if self.buf == '':
self.readline()
buflen = len(self.buf)
else:
self.offset += (buflen - len(self.buf))
break
# do not keep comments in the syntax tree
if not self.keep_comments:
# skip line comment
if self.buf[0] == '/':
if self.buf[1] == '/':
self.readline()
return self.skip()
# skip block comment
elif self.buf[1] == '*':
i = self.buf.find('*/')
while i == -1:
self.readline()
i = self.buf.find('*/')
self.set_buf(i+2)
return self.skip() | [
"def",
"skip",
"(",
"self",
")",
":",
"buflen",
"=",
"len",
"(",
"self",
".",
"buf",
")",
"while",
"True",
":",
"self",
".",
"buf",
"=",
"self",
".",
"buf",
".",
"lstrip",
"(",
")",
"if",
"self",
".",
"buf",
"==",
"''",
":",
"self",
".",
"rea... | Skip whitespace and count position | [
"Skip",
"whitespace",
"and",
"count",
"position"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/yang_parser.py#L51-L78 | train | 226,000 |
mbj4668/pyang | pyang/yang_parser.py | YangParser.parse | def parse(self, ctx, ref, text):
"""Parse the string `text` containing a YANG statement.
Return a Statement on success or None on failure
"""
self.ctx = ctx
self.pos = error.Position(ref)
self.top = None
try:
self.tokenizer = YangTokenizer(text, self.pos, ctx.errors,
ctx.max_line_len, ctx.keep_comments,
not ctx.lax_quote_checks)
stmt = self._parse_statement(None)
except error.Abort:
return None
except error.Eof as e:
error.err_add(self.ctx.errors, self.pos, 'EOF_ERROR', ())
return None
try:
# we expect a error.Eof at this point, everything else is an error
self.tokenizer.peek()
except error.Eof:
return stmt
except:
pass
error.err_add(self.ctx.errors, self.pos, 'TRAILING_GARBAGE', ())
return None | python | def parse(self, ctx, ref, text):
"""Parse the string `text` containing a YANG statement.
Return a Statement on success or None on failure
"""
self.ctx = ctx
self.pos = error.Position(ref)
self.top = None
try:
self.tokenizer = YangTokenizer(text, self.pos, ctx.errors,
ctx.max_line_len, ctx.keep_comments,
not ctx.lax_quote_checks)
stmt = self._parse_statement(None)
except error.Abort:
return None
except error.Eof as e:
error.err_add(self.ctx.errors, self.pos, 'EOF_ERROR', ())
return None
try:
# we expect a error.Eof at this point, everything else is an error
self.tokenizer.peek()
except error.Eof:
return stmt
except:
pass
error.err_add(self.ctx.errors, self.pos, 'TRAILING_GARBAGE', ())
return None | [
"def",
"parse",
"(",
"self",
",",
"ctx",
",",
"ref",
",",
"text",
")",
":",
"self",
".",
"ctx",
"=",
"ctx",
"self",
".",
"pos",
"=",
"error",
".",
"Position",
"(",
"ref",
")",
"self",
".",
"top",
"=",
"None",
"try",
":",
"self",
".",
"tokenizer... | Parse the string `text` containing a YANG statement.
Return a Statement on success or None on failure | [
"Parse",
"the",
"string",
"text",
"containing",
"a",
"YANG",
"statement",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/yang_parser.py#L261-L288 | train | 226,001 |
mbj4668/pyang | pyang/statements.py | add_validation_phase | def add_validation_phase(phase, before=None, after=None):
"""Add a validation phase to the framework.
Can be used by plugins to do special validation of extensions."""
idx = 0
for x in _validation_phases:
if x == before:
_validation_phases.insert(idx, phase)
return
elif x == after:
_validation_phases.insert(idx+1, phase)
return
idx = idx + 1
# otherwise append at the end
_validation_phases.append(phase) | python | def add_validation_phase(phase, before=None, after=None):
"""Add a validation phase to the framework.
Can be used by plugins to do special validation of extensions."""
idx = 0
for x in _validation_phases:
if x == before:
_validation_phases.insert(idx, phase)
return
elif x == after:
_validation_phases.insert(idx+1, phase)
return
idx = idx + 1
# otherwise append at the end
_validation_phases.append(phase) | [
"def",
"add_validation_phase",
"(",
"phase",
",",
"before",
"=",
"None",
",",
"after",
"=",
"None",
")",
":",
"idx",
"=",
"0",
"for",
"x",
"in",
"_validation_phases",
":",
"if",
"x",
"==",
"before",
":",
"_validation_phases",
".",
"insert",
"(",
"idx",
... | Add a validation phase to the framework.
Can be used by plugins to do special validation of extensions. | [
"Add",
"a",
"validation",
"phase",
"to",
"the",
"framework",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L16-L30 | train | 226,002 |
mbj4668/pyang | pyang/statements.py | add_validation_fun | def add_validation_fun(phase, keywords, f):
"""Add a validation function to some phase in the framework.
Function `f` is called for each valid occurance of each keyword in
`keywords`.
Can be used by plugins to do special validation of extensions."""
for keyword in keywords:
if (phase, keyword) in _validation_map:
oldf = _validation_map[(phase, keyword)]
def newf(ctx, s):
oldf(ctx, s)
f(ctx, s)
_validation_map[(phase, keyword)] = newf
else:
_validation_map[(phase, keyword)] = f | python | def add_validation_fun(phase, keywords, f):
"""Add a validation function to some phase in the framework.
Function `f` is called for each valid occurance of each keyword in
`keywords`.
Can be used by plugins to do special validation of extensions."""
for keyword in keywords:
if (phase, keyword) in _validation_map:
oldf = _validation_map[(phase, keyword)]
def newf(ctx, s):
oldf(ctx, s)
f(ctx, s)
_validation_map[(phase, keyword)] = newf
else:
_validation_map[(phase, keyword)] = f | [
"def",
"add_validation_fun",
"(",
"phase",
",",
"keywords",
",",
"f",
")",
":",
"for",
"keyword",
"in",
"keywords",
":",
"if",
"(",
"phase",
",",
"keyword",
")",
"in",
"_validation_map",
":",
"oldf",
"=",
"_validation_map",
"[",
"(",
"phase",
",",
"keywo... | Add a validation function to some phase in the framework.
Function `f` is called for each valid occurance of each keyword in
`keywords`.
Can be used by plugins to do special validation of extensions. | [
"Add",
"a",
"validation",
"function",
"to",
"some",
"phase",
"in",
"the",
"framework",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L32-L46 | train | 226,003 |
mbj4668/pyang | pyang/statements.py | v_init_extension | def v_init_extension(ctx, stmt):
"""find the modulename of the prefix, and set `stmt.keyword`"""
(prefix, identifier) = stmt.raw_keyword
(modname, revision) = \
prefix_to_modulename_and_revision(stmt.i_module, prefix,
stmt.pos, ctx.errors)
stmt.keyword = (modname, identifier)
stmt.i_extension_modulename = modname
stmt.i_extension_revision = revision
stmt.i_extension = None | python | def v_init_extension(ctx, stmt):
"""find the modulename of the prefix, and set `stmt.keyword`"""
(prefix, identifier) = stmt.raw_keyword
(modname, revision) = \
prefix_to_modulename_and_revision(stmt.i_module, prefix,
stmt.pos, ctx.errors)
stmt.keyword = (modname, identifier)
stmt.i_extension_modulename = modname
stmt.i_extension_revision = revision
stmt.i_extension = None | [
"def",
"v_init_extension",
"(",
"ctx",
",",
"stmt",
")",
":",
"(",
"prefix",
",",
"identifier",
")",
"=",
"stmt",
".",
"raw_keyword",
"(",
"modname",
",",
"revision",
")",
"=",
"prefix_to_modulename_and_revision",
"(",
"stmt",
".",
"i_module",
",",
"prefix",... | find the modulename of the prefix, and set `stmt.keyword` | [
"find",
"the",
"modulename",
"of",
"the",
"prefix",
"and",
"set",
"stmt",
".",
"keyword"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L491-L500 | train | 226,004 |
mbj4668/pyang | pyang/statements.py | v_grammar_unique_defs | def v_grammar_unique_defs(ctx, stmt):
"""Verify that all typedefs and groupings are unique
Called for every statement.
Stores all typedefs in stmt.i_typedef, groupings in stmt.i_grouping
"""
defs = [('typedef', 'TYPE_ALREADY_DEFINED', stmt.i_typedefs),
('grouping', 'GROUPING_ALREADY_DEFINED', stmt.i_groupings)]
if stmt.parent is None:
defs.extend(
[('feature', 'FEATURE_ALREADY_DEFINED', stmt.i_features),
('identity', 'IDENTITY_ALREADY_DEFINED', stmt.i_identities),
('extension', 'EXTENSION_ALREADY_DEFINED', stmt.i_extensions)])
for (keyword, errcode, dict) in defs:
for definition in stmt.search(keyword):
if definition.arg in dict:
other = dict[definition.arg]
err_add(ctx.errors, definition.pos,
errcode, (definition.arg, other.pos))
else:
dict[definition.arg] = definition | python | def v_grammar_unique_defs(ctx, stmt):
"""Verify that all typedefs and groupings are unique
Called for every statement.
Stores all typedefs in stmt.i_typedef, groupings in stmt.i_grouping
"""
defs = [('typedef', 'TYPE_ALREADY_DEFINED', stmt.i_typedefs),
('grouping', 'GROUPING_ALREADY_DEFINED', stmt.i_groupings)]
if stmt.parent is None:
defs.extend(
[('feature', 'FEATURE_ALREADY_DEFINED', stmt.i_features),
('identity', 'IDENTITY_ALREADY_DEFINED', stmt.i_identities),
('extension', 'EXTENSION_ALREADY_DEFINED', stmt.i_extensions)])
for (keyword, errcode, dict) in defs:
for definition in stmt.search(keyword):
if definition.arg in dict:
other = dict[definition.arg]
err_add(ctx.errors, definition.pos,
errcode, (definition.arg, other.pos))
else:
dict[definition.arg] = definition | [
"def",
"v_grammar_unique_defs",
"(",
"ctx",
",",
"stmt",
")",
":",
"defs",
"=",
"[",
"(",
"'typedef'",
",",
"'TYPE_ALREADY_DEFINED'",
",",
"stmt",
".",
"i_typedefs",
")",
",",
"(",
"'grouping'",
",",
"'GROUPING_ALREADY_DEFINED'",
",",
"stmt",
".",
"i_groupings... | Verify that all typedefs and groupings are unique
Called for every statement.
Stores all typedefs in stmt.i_typedef, groupings in stmt.i_grouping | [
"Verify",
"that",
"all",
"typedefs",
"and",
"groupings",
"are",
"unique",
"Called",
"for",
"every",
"statement",
".",
"Stores",
"all",
"typedefs",
"in",
"stmt",
".",
"i_typedef",
"groupings",
"in",
"stmt",
".",
"i_grouping"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L537-L556 | train | 226,005 |
mbj4668/pyang | pyang/statements.py | v_type_extension | def v_type_extension(ctx, stmt):
"""verify that the extension matches the extension definition"""
(modulename, identifier) = stmt.keyword
revision = stmt.i_extension_revision
module = modulename_to_module(stmt.i_module, modulename, revision)
if module is None:
return
if identifier not in module.i_extensions:
if module.i_modulename == stmt.i_orig_module.i_modulename:
# extension defined in current submodule
if identifier not in stmt.i_orig_module.i_extensions:
err_add(ctx.errors, stmt.pos, 'EXTENSION_NOT_DEFINED',
(identifier, module.arg))
return
else:
stmt.i_extension = stmt.i_orig_module.i_extensions[identifier]
else:
err_add(ctx.errors, stmt.pos, 'EXTENSION_NOT_DEFINED',
(identifier, module.arg))
return
else:
stmt.i_extension = module.i_extensions[identifier]
ext_arg = stmt.i_extension.search_one('argument')
if stmt.arg is not None and ext_arg is None:
err_add(ctx.errors, stmt.pos, 'EXTENSION_ARGUMENT_PRESENT',
identifier)
elif stmt.arg is None and ext_arg is not None:
err_add(ctx.errors, stmt.pos, 'EXTENSION_NO_ARGUMENT_PRESENT',
identifier) | python | def v_type_extension(ctx, stmt):
"""verify that the extension matches the extension definition"""
(modulename, identifier) = stmt.keyword
revision = stmt.i_extension_revision
module = modulename_to_module(stmt.i_module, modulename, revision)
if module is None:
return
if identifier not in module.i_extensions:
if module.i_modulename == stmt.i_orig_module.i_modulename:
# extension defined in current submodule
if identifier not in stmt.i_orig_module.i_extensions:
err_add(ctx.errors, stmt.pos, 'EXTENSION_NOT_DEFINED',
(identifier, module.arg))
return
else:
stmt.i_extension = stmt.i_orig_module.i_extensions[identifier]
else:
err_add(ctx.errors, stmt.pos, 'EXTENSION_NOT_DEFINED',
(identifier, module.arg))
return
else:
stmt.i_extension = module.i_extensions[identifier]
ext_arg = stmt.i_extension.search_one('argument')
if stmt.arg is not None and ext_arg is None:
err_add(ctx.errors, stmt.pos, 'EXTENSION_ARGUMENT_PRESENT',
identifier)
elif stmt.arg is None and ext_arg is not None:
err_add(ctx.errors, stmt.pos, 'EXTENSION_NO_ARGUMENT_PRESENT',
identifier) | [
"def",
"v_type_extension",
"(",
"ctx",
",",
"stmt",
")",
":",
"(",
"modulename",
",",
"identifier",
")",
"=",
"stmt",
".",
"keyword",
"revision",
"=",
"stmt",
".",
"i_extension_revision",
"module",
"=",
"modulename_to_module",
"(",
"stmt",
".",
"i_module",
"... | verify that the extension matches the extension definition | [
"verify",
"that",
"the",
"extension",
"matches",
"the",
"extension",
"definition"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L1172-L1200 | train | 226,006 |
mbj4668/pyang | pyang/statements.py | v_type_if_feature | def v_type_if_feature(ctx, stmt, no_error_report=False):
"""verify that the referenced feature exists."""
stmt.i_feature = None
# Verify the argument type
expr = syntax.parse_if_feature_expr(stmt.arg)
if stmt.i_module.i_version == '1':
# version 1 allows only a single value as if-feature
if type(expr) != type(''):
err_add(ctx.errors, stmt.pos,
'BAD_VALUE', (stmt.arg, 'identifier-ref'))
return
def eval(expr):
if type(expr) == type(''):
return has_feature(expr)
else:
(op, op1, op2) = expr
if op == 'not':
return not eval(op1)
elif op == 'and':
return eval(op1) and eval(op2)
elif op == 'or':
return eval(op1) or eval(op2)
def has_feature(name):
# raises Abort if the feature is not defined
# returns True if we compile with the feature
# returns False if we compile without the feature
found = None
if name.find(":") == -1:
prefix = None
else:
[prefix, name] = name.split(':', 1)
if prefix is None or stmt.i_module.i_prefix == prefix:
# check local features
pmodule = stmt.i_module
else:
# this is a prefixed name, check the imported modules
pmodule = prefix_to_module(stmt.i_module, prefix,
stmt.pos, ctx.errors)
if pmodule is None:
raise Abort
if name in pmodule.i_features:
f = pmodule.i_features[name]
if prefix is None and not is_submodule_included(stmt, f):
pass
else:
found = pmodule.i_features[name]
v_type_feature(ctx, found)
if pmodule.i_modulename in ctx.features:
if name not in ctx.features[pmodule.i_modulename]:
return False
if found is None and no_error_report == False:
err_add(ctx.errors, stmt.pos,
'FEATURE_NOT_FOUND', (name, pmodule.arg))
raise Abort
return found is not None
# Evaluate the if-feature expression, and verify that all
# referenced features exist.
try:
if eval(expr) == False:
# prune the parent.
# since the parent can have more than one if-feature
# statement, we must check if the parent
# already has been scheduled for removal
if stmt.parent not in stmt.i_module.i_prune:
stmt.i_module.i_prune.append(stmt.parent)
except Abort:
pass | python | def v_type_if_feature(ctx, stmt, no_error_report=False):
"""verify that the referenced feature exists."""
stmt.i_feature = None
# Verify the argument type
expr = syntax.parse_if_feature_expr(stmt.arg)
if stmt.i_module.i_version == '1':
# version 1 allows only a single value as if-feature
if type(expr) != type(''):
err_add(ctx.errors, stmt.pos,
'BAD_VALUE', (stmt.arg, 'identifier-ref'))
return
def eval(expr):
if type(expr) == type(''):
return has_feature(expr)
else:
(op, op1, op2) = expr
if op == 'not':
return not eval(op1)
elif op == 'and':
return eval(op1) and eval(op2)
elif op == 'or':
return eval(op1) or eval(op2)
def has_feature(name):
# raises Abort if the feature is not defined
# returns True if we compile with the feature
# returns False if we compile without the feature
found = None
if name.find(":") == -1:
prefix = None
else:
[prefix, name] = name.split(':', 1)
if prefix is None or stmt.i_module.i_prefix == prefix:
# check local features
pmodule = stmt.i_module
else:
# this is a prefixed name, check the imported modules
pmodule = prefix_to_module(stmt.i_module, prefix,
stmt.pos, ctx.errors)
if pmodule is None:
raise Abort
if name in pmodule.i_features:
f = pmodule.i_features[name]
if prefix is None and not is_submodule_included(stmt, f):
pass
else:
found = pmodule.i_features[name]
v_type_feature(ctx, found)
if pmodule.i_modulename in ctx.features:
if name not in ctx.features[pmodule.i_modulename]:
return False
if found is None and no_error_report == False:
err_add(ctx.errors, stmt.pos,
'FEATURE_NOT_FOUND', (name, pmodule.arg))
raise Abort
return found is not None
# Evaluate the if-feature expression, and verify that all
# referenced features exist.
try:
if eval(expr) == False:
# prune the parent.
# since the parent can have more than one if-feature
# statement, we must check if the parent
# already has been scheduled for removal
if stmt.parent not in stmt.i_module.i_prune:
stmt.i_module.i_prune.append(stmt.parent)
except Abort:
pass | [
"def",
"v_type_if_feature",
"(",
"ctx",
",",
"stmt",
",",
"no_error_report",
"=",
"False",
")",
":",
"stmt",
".",
"i_feature",
"=",
"None",
"# Verify the argument type",
"expr",
"=",
"syntax",
".",
"parse_if_feature_expr",
"(",
"stmt",
".",
"arg",
")",
"if",
... | verify that the referenced feature exists. | [
"verify",
"that",
"the",
"referenced",
"feature",
"exists",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L1224-L1294 | train | 226,007 |
mbj4668/pyang | pyang/statements.py | v_type_base | def v_type_base(ctx, stmt, no_error_report=False):
"""verify that the referenced identity exists."""
# Find the identity
name = stmt.arg
stmt.i_identity = None
if name.find(":") == -1:
prefix = None
else:
[prefix, name] = name.split(':', 1)
if prefix is None or stmt.i_module.i_prefix == prefix:
# check local identities
pmodule = stmt.i_module
else:
# this is a prefixed name, check the imported modules
pmodule = prefix_to_module(stmt.i_module, prefix, stmt.pos, ctx.errors)
if pmodule is None:
return
if name in pmodule.i_identities:
i = pmodule.i_identities[name]
if prefix is None and not is_submodule_included(stmt, i):
pass
else:
stmt.i_identity = i
v_type_identity(ctx, stmt.i_identity)
if stmt.i_identity is None and no_error_report == False:
err_add(ctx.errors, stmt.pos,
'IDENTITY_NOT_FOUND', (name, pmodule.arg)) | python | def v_type_base(ctx, stmt, no_error_report=False):
"""verify that the referenced identity exists."""
# Find the identity
name = stmt.arg
stmt.i_identity = None
if name.find(":") == -1:
prefix = None
else:
[prefix, name] = name.split(':', 1)
if prefix is None or stmt.i_module.i_prefix == prefix:
# check local identities
pmodule = stmt.i_module
else:
# this is a prefixed name, check the imported modules
pmodule = prefix_to_module(stmt.i_module, prefix, stmt.pos, ctx.errors)
if pmodule is None:
return
if name in pmodule.i_identities:
i = pmodule.i_identities[name]
if prefix is None and not is_submodule_included(stmt, i):
pass
else:
stmt.i_identity = i
v_type_identity(ctx, stmt.i_identity)
if stmt.i_identity is None and no_error_report == False:
err_add(ctx.errors, stmt.pos,
'IDENTITY_NOT_FOUND', (name, pmodule.arg)) | [
"def",
"v_type_base",
"(",
"ctx",
",",
"stmt",
",",
"no_error_report",
"=",
"False",
")",
":",
"# Find the identity",
"name",
"=",
"stmt",
".",
"arg",
"stmt",
".",
"i_identity",
"=",
"None",
"if",
"name",
".",
"find",
"(",
"\":\"",
")",
"==",
"-",
"1",... | verify that the referenced identity exists. | [
"verify",
"that",
"the",
"referenced",
"identity",
"exists",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L1333-L1359 | train | 226,008 |
mbj4668/pyang | pyang/statements.py | v_unique_name_defintions | def v_unique_name_defintions(ctx, stmt):
"""Make sure that all top-level definitions in a module are unique"""
defs = [('typedef', 'TYPE_ALREADY_DEFINED', stmt.i_typedefs),
('grouping', 'GROUPING_ALREADY_DEFINED', stmt.i_groupings)]
def f(s):
for (keyword, errcode, dict) in defs:
if s.keyword == keyword and s.arg in dict:
err_add(ctx.errors, dict[s.arg].pos,
errcode, (s.arg, s.pos))
for i in stmt.search('include'):
submodulename = i.arg
subm = ctx.get_module(submodulename)
if subm is not None:
for s in subm.substmts:
for ss in s.substmts:
iterate_stmt(ss, f) | python | def v_unique_name_defintions(ctx, stmt):
"""Make sure that all top-level definitions in a module are unique"""
defs = [('typedef', 'TYPE_ALREADY_DEFINED', stmt.i_typedefs),
('grouping', 'GROUPING_ALREADY_DEFINED', stmt.i_groupings)]
def f(s):
for (keyword, errcode, dict) in defs:
if s.keyword == keyword and s.arg in dict:
err_add(ctx.errors, dict[s.arg].pos,
errcode, (s.arg, s.pos))
for i in stmt.search('include'):
submodulename = i.arg
subm = ctx.get_module(submodulename)
if subm is not None:
for s in subm.substmts:
for ss in s.substmts:
iterate_stmt(ss, f) | [
"def",
"v_unique_name_defintions",
"(",
"ctx",
",",
"stmt",
")",
":",
"defs",
"=",
"[",
"(",
"'typedef'",
",",
"'TYPE_ALREADY_DEFINED'",
",",
"stmt",
".",
"i_typedefs",
")",
",",
"(",
"'grouping'",
",",
"'GROUPING_ALREADY_DEFINED'",
",",
"stmt",
".",
"i_groupi... | Make sure that all top-level definitions in a module are unique | [
"Make",
"sure",
"that",
"all",
"top",
"-",
"level",
"definitions",
"in",
"a",
"module",
"are",
"unique"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L1802-L1818 | train | 226,009 |
mbj4668/pyang | pyang/statements.py | v_unique_name_children | def v_unique_name_children(ctx, stmt):
"""Make sure that each child of stmt has a unique name"""
def sort_pos(p1, p2):
if p1.line < p2.line:
return (p1,p2)
else:
return (p2,p1)
dict = {}
chs = stmt.i_children
def check(c):
key = (c.i_module.i_modulename, c.arg)
if key in dict:
dup = dict[key]
(minpos, maxpos) = sort_pos(c.pos, dup.pos)
pos = chk_uses_pos(c, maxpos)
err_add(ctx.errors, pos,
'DUPLICATE_CHILD_NAME', (stmt.arg, stmt.pos, c.arg, minpos))
else:
dict[key] = c
# also check all data nodes in the cases
if c.keyword == 'choice':
for case in c.i_children:
for cc in case.i_children:
check(cc)
for c in chs:
check(c) | python | def v_unique_name_children(ctx, stmt):
"""Make sure that each child of stmt has a unique name"""
def sort_pos(p1, p2):
if p1.line < p2.line:
return (p1,p2)
else:
return (p2,p1)
dict = {}
chs = stmt.i_children
def check(c):
key = (c.i_module.i_modulename, c.arg)
if key in dict:
dup = dict[key]
(minpos, maxpos) = sort_pos(c.pos, dup.pos)
pos = chk_uses_pos(c, maxpos)
err_add(ctx.errors, pos,
'DUPLICATE_CHILD_NAME', (stmt.arg, stmt.pos, c.arg, minpos))
else:
dict[key] = c
# also check all data nodes in the cases
if c.keyword == 'choice':
for case in c.i_children:
for cc in case.i_children:
check(cc)
for c in chs:
check(c) | [
"def",
"v_unique_name_children",
"(",
"ctx",
",",
"stmt",
")",
":",
"def",
"sort_pos",
"(",
"p1",
",",
"p2",
")",
":",
"if",
"p1",
".",
"line",
"<",
"p2",
".",
"line",
":",
"return",
"(",
"p1",
",",
"p2",
")",
"else",
":",
"return",
"(",
"p2",
... | Make sure that each child of stmt has a unique name | [
"Make",
"sure",
"that",
"each",
"child",
"of",
"stmt",
"has",
"a",
"unique",
"name"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L1821-L1850 | train | 226,010 |
mbj4668/pyang | pyang/statements.py | v_unique_name_leaf_list | def v_unique_name_leaf_list(ctx, stmt):
"""Make sure config true leaf-lists do nothave duplicate defaults"""
if not stmt.i_config:
return
seen = []
for defval in stmt.i_default:
if defval in seen:
err_add(ctx.errors, stmt.pos, 'DUPLICATE_DEFAULT', (defval))
else:
seen.append(defval) | python | def v_unique_name_leaf_list(ctx, stmt):
"""Make sure config true leaf-lists do nothave duplicate defaults"""
if not stmt.i_config:
return
seen = []
for defval in stmt.i_default:
if defval in seen:
err_add(ctx.errors, stmt.pos, 'DUPLICATE_DEFAULT', (defval))
else:
seen.append(defval) | [
"def",
"v_unique_name_leaf_list",
"(",
"ctx",
",",
"stmt",
")",
":",
"if",
"not",
"stmt",
".",
"i_config",
":",
"return",
"seen",
"=",
"[",
"]",
"for",
"defval",
"in",
"stmt",
".",
"i_default",
":",
"if",
"defval",
"in",
"seen",
":",
"err_add",
"(",
... | Make sure config true leaf-lists do nothave duplicate defaults | [
"Make",
"sure",
"config",
"true",
"leaf",
"-",
"lists",
"do",
"nothave",
"duplicate",
"defaults"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L1852-L1862 | train | 226,011 |
mbj4668/pyang | pyang/statements.py | v_reference_choice | def v_reference_choice(ctx, stmt):
"""Make sure that the default case exists"""
d = stmt.search_one('default')
if d is not None:
m = stmt.search_one('mandatory')
if m is not None and m.arg == 'true':
err_add(ctx.errors, stmt.pos, 'DEFAULT_AND_MANDATORY', ())
ptr = attrsearch(d.arg, 'arg', stmt.i_children)
if ptr is None:
err_add(ctx.errors, d.pos, 'DEFAULT_CASE_NOT_FOUND', d.arg)
else:
# make sure there are no mandatory nodes in the default case
def chk_no_defaults(s):
for c in s.i_children:
if c.keyword in ('leaf', 'choice'):
m = c.search_one('mandatory')
if m is not None and m.arg == 'true':
err_add(ctx.errors, c.pos,
'MANDATORY_NODE_IN_DEFAULT_CASE', ())
elif c.keyword in ('list', 'leaf-list'):
m = c.search_one('min-elements')
if m is not None and int(m.arg) > 0:
err_add(ctx.errors, c.pos,
'MANDATORY_NODE_IN_DEFAULT_CASE', ())
elif c.keyword == 'container':
p = c.search_one('presence')
if p == None or p.arg == 'false':
chk_no_defaults(c)
chk_no_defaults(ptr) | python | def v_reference_choice(ctx, stmt):
"""Make sure that the default case exists"""
d = stmt.search_one('default')
if d is not None:
m = stmt.search_one('mandatory')
if m is not None and m.arg == 'true':
err_add(ctx.errors, stmt.pos, 'DEFAULT_AND_MANDATORY', ())
ptr = attrsearch(d.arg, 'arg', stmt.i_children)
if ptr is None:
err_add(ctx.errors, d.pos, 'DEFAULT_CASE_NOT_FOUND', d.arg)
else:
# make sure there are no mandatory nodes in the default case
def chk_no_defaults(s):
for c in s.i_children:
if c.keyword in ('leaf', 'choice'):
m = c.search_one('mandatory')
if m is not None and m.arg == 'true':
err_add(ctx.errors, c.pos,
'MANDATORY_NODE_IN_DEFAULT_CASE', ())
elif c.keyword in ('list', 'leaf-list'):
m = c.search_one('min-elements')
if m is not None and int(m.arg) > 0:
err_add(ctx.errors, c.pos,
'MANDATORY_NODE_IN_DEFAULT_CASE', ())
elif c.keyword == 'container':
p = c.search_one('presence')
if p == None or p.arg == 'false':
chk_no_defaults(c)
chk_no_defaults(ptr) | [
"def",
"v_reference_choice",
"(",
"ctx",
",",
"stmt",
")",
":",
"d",
"=",
"stmt",
".",
"search_one",
"(",
"'default'",
")",
"if",
"d",
"is",
"not",
"None",
":",
"m",
"=",
"stmt",
".",
"search_one",
"(",
"'mandatory'",
")",
"if",
"m",
"is",
"not",
"... | Make sure that the default case exists | [
"Make",
"sure",
"that",
"the",
"default",
"case",
"exists"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L1993-L2021 | train | 226,012 |
mbj4668/pyang | pyang/statements.py | v_reference_leaf_leafref | def v_reference_leaf_leafref(ctx, stmt):
"""Verify that all leafrefs in a leaf or leaf-list have correct path"""
if (hasattr(stmt, 'i_leafref') and
stmt.i_leafref is not None and
stmt.i_leafref_expanded is False):
path_type_spec = stmt.i_leafref
not_req_inst = not(path_type_spec.require_instance)
x = validate_leafref_path(ctx, stmt,
path_type_spec.path_spec,
path_type_spec.path_,
accept_non_config_target=not_req_inst
)
if x is None:
return
ptr, expanded_path, path_list = x
path_type_spec.i_target_node = ptr
path_type_spec.i_expanded_path = expanded_path
path_type_spec.i_path_list = path_list
stmt.i_leafref_expanded = True
if ptr is not None:
chk_status(ctx, stmt, ptr)
stmt.i_leafref_ptr = (ptr, path_type_spec.pos) | python | def v_reference_leaf_leafref(ctx, stmt):
"""Verify that all leafrefs in a leaf or leaf-list have correct path"""
if (hasattr(stmt, 'i_leafref') and
stmt.i_leafref is not None and
stmt.i_leafref_expanded is False):
path_type_spec = stmt.i_leafref
not_req_inst = not(path_type_spec.require_instance)
x = validate_leafref_path(ctx, stmt,
path_type_spec.path_spec,
path_type_spec.path_,
accept_non_config_target=not_req_inst
)
if x is None:
return
ptr, expanded_path, path_list = x
path_type_spec.i_target_node = ptr
path_type_spec.i_expanded_path = expanded_path
path_type_spec.i_path_list = path_list
stmt.i_leafref_expanded = True
if ptr is not None:
chk_status(ctx, stmt, ptr)
stmt.i_leafref_ptr = (ptr, path_type_spec.pos) | [
"def",
"v_reference_leaf_leafref",
"(",
"ctx",
",",
"stmt",
")",
":",
"if",
"(",
"hasattr",
"(",
"stmt",
",",
"'i_leafref'",
")",
"and",
"stmt",
".",
"i_leafref",
"is",
"not",
"None",
"and",
"stmt",
".",
"i_leafref_expanded",
"is",
"False",
")",
":",
"pa... | Verify that all leafrefs in a leaf or leaf-list have correct path | [
"Verify",
"that",
"all",
"leafrefs",
"in",
"a",
"leaf",
"or",
"leaf",
"-",
"list",
"have",
"correct",
"path"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L2023-L2045 | train | 226,013 |
mbj4668/pyang | pyang/statements.py | has_type | def has_type(type, names):
"""Return type with name if `type` has name as one of its base types,
and name is in the `names` list. otherwise, return None."""
if type.arg in names:
return type
for t in type.search('type'): # check all union's member types
r = has_type(t, names)
if r is not None:
return r
if not hasattr(type, 'i_typedef'):
return None
if (type.i_typedef is not None and
hasattr(type.i_typedef, 'i_is_circular') and
type.i_typedef.i_is_circular == False):
t = type.i_typedef.search_one('type')
if t is not None:
return has_type(t, names)
return None | python | def has_type(type, names):
"""Return type with name if `type` has name as one of its base types,
and name is in the `names` list. otherwise, return None."""
if type.arg in names:
return type
for t in type.search('type'): # check all union's member types
r = has_type(t, names)
if r is not None:
return r
if not hasattr(type, 'i_typedef'):
return None
if (type.i_typedef is not None and
hasattr(type.i_typedef, 'i_is_circular') and
type.i_typedef.i_is_circular == False):
t = type.i_typedef.search_one('type')
if t is not None:
return has_type(t, names)
return None | [
"def",
"has_type",
"(",
"type",
",",
"names",
")",
":",
"if",
"type",
".",
"arg",
"in",
"names",
":",
"return",
"type",
"for",
"t",
"in",
"type",
".",
"search",
"(",
"'type'",
")",
":",
"# check all union's member types",
"r",
"=",
"has_type",
"(",
"t"... | Return type with name if `type` has name as one of its base types,
and name is in the `names` list. otherwise, return None. | [
"Return",
"type",
"with",
"name",
"if",
"type",
"has",
"name",
"as",
"one",
"of",
"its",
"base",
"types",
"and",
"name",
"is",
"in",
"the",
"names",
"list",
".",
"otherwise",
"return",
"None",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L2268-L2285 | train | 226,014 |
mbj4668/pyang | pyang/statements.py | search_typedef | def search_typedef(stmt, name):
"""Search for a typedef in scope
First search the hierarchy, then the module and its submodules."""
mod = stmt.i_orig_module
while stmt is not None:
if name in stmt.i_typedefs:
t = stmt.i_typedefs[name]
if (mod is not None and
mod != t.i_orig_module and
t.i_orig_module.keyword == 'submodule'):
# make sure this submodule is included
if mod.search_one('include', t.i_orig_module.arg) is None:
return None
return t
stmt = stmt.parent
return None | python | def search_typedef(stmt, name):
"""Search for a typedef in scope
First search the hierarchy, then the module and its submodules."""
mod = stmt.i_orig_module
while stmt is not None:
if name in stmt.i_typedefs:
t = stmt.i_typedefs[name]
if (mod is not None and
mod != t.i_orig_module and
t.i_orig_module.keyword == 'submodule'):
# make sure this submodule is included
if mod.search_one('include', t.i_orig_module.arg) is None:
return None
return t
stmt = stmt.parent
return None | [
"def",
"search_typedef",
"(",
"stmt",
",",
"name",
")",
":",
"mod",
"=",
"stmt",
".",
"i_orig_module",
"while",
"stmt",
"is",
"not",
"None",
":",
"if",
"name",
"in",
"stmt",
".",
"i_typedefs",
":",
"t",
"=",
"stmt",
".",
"i_typedefs",
"[",
"name",
"]... | Search for a typedef in scope
First search the hierarchy, then the module and its submodules. | [
"Search",
"for",
"a",
"typedef",
"in",
"scope",
"First",
"search",
"the",
"hierarchy",
"then",
"the",
"module",
"and",
"its",
"submodules",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L2318-L2333 | train | 226,015 |
mbj4668/pyang | pyang/statements.py | search_grouping | def search_grouping(stmt, name):
"""Search for a grouping in scope
First search the hierarchy, then the module and its submodules."""
mod = stmt.i_orig_module
while stmt is not None:
if name in stmt.i_groupings:
g = stmt.i_groupings[name]
if (mod is not None and
mod != g.i_orig_module and
g.i_orig_module.keyword == 'submodule'):
# make sure this submodule is included
if mod.search_one('include', g.i_orig_module.arg) is None:
return None
return g
stmt = stmt.parent
return None | python | def search_grouping(stmt, name):
"""Search for a grouping in scope
First search the hierarchy, then the module and its submodules."""
mod = stmt.i_orig_module
while stmt is not None:
if name in stmt.i_groupings:
g = stmt.i_groupings[name]
if (mod is not None and
mod != g.i_orig_module and
g.i_orig_module.keyword == 'submodule'):
# make sure this submodule is included
if mod.search_one('include', g.i_orig_module.arg) is None:
return None
return g
stmt = stmt.parent
return None | [
"def",
"search_grouping",
"(",
"stmt",
",",
"name",
")",
":",
"mod",
"=",
"stmt",
".",
"i_orig_module",
"while",
"stmt",
"is",
"not",
"None",
":",
"if",
"name",
"in",
"stmt",
".",
"i_groupings",
":",
"g",
"=",
"stmt",
".",
"i_groupings",
"[",
"name",
... | Search for a grouping in scope
First search the hierarchy, then the module and its submodules. | [
"Search",
"for",
"a",
"grouping",
"in",
"scope",
"First",
"search",
"the",
"hierarchy",
"then",
"the",
"module",
"and",
"its",
"submodules",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L2335-L2350 | train | 226,016 |
mbj4668/pyang | pyang/statements.py | is_submodule_included | def is_submodule_included(src, tgt):
"""Check that the tgt's submodule is included by src, if they belong
to the same module."""
if tgt is None or not hasattr(tgt, 'i_orig_module'):
return True
if (tgt.i_orig_module.keyword == 'submodule' and
src.i_orig_module != tgt.i_orig_module and
src.i_orig_module.i_modulename == tgt.i_orig_module.i_modulename):
if src.i_orig_module.search_one('include',
tgt.i_orig_module.arg) is None:
return False
return True | python | def is_submodule_included(src, tgt):
"""Check that the tgt's submodule is included by src, if they belong
to the same module."""
if tgt is None or not hasattr(tgt, 'i_orig_module'):
return True
if (tgt.i_orig_module.keyword == 'submodule' and
src.i_orig_module != tgt.i_orig_module and
src.i_orig_module.i_modulename == tgt.i_orig_module.i_modulename):
if src.i_orig_module.search_one('include',
tgt.i_orig_module.arg) is None:
return False
return True | [
"def",
"is_submodule_included",
"(",
"src",
",",
"tgt",
")",
":",
"if",
"tgt",
"is",
"None",
"or",
"not",
"hasattr",
"(",
"tgt",
",",
"'i_orig_module'",
")",
":",
"return",
"True",
"if",
"(",
"tgt",
".",
"i_orig_module",
".",
"keyword",
"==",
"'submodule... | Check that the tgt's submodule is included by src, if they belong
to the same module. | [
"Check",
"that",
"the",
"tgt",
"s",
"submodule",
"is",
"included",
"by",
"src",
"if",
"they",
"belong",
"to",
"the",
"same",
"module",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L2466-L2477 | train | 226,017 |
mbj4668/pyang | pyang/statements.py | mk_path_str | def mk_path_str(stmt,
with_prefixes=False,
prefix_onchange=False,
prefix_to_module=False,
resolve_top_prefix_to_module=False):
"""Returns the XPath path of the node.
with_prefixes indicates whether or not to prefix every node.
prefix_onchange modifies the behavior of with_prefixes and
only adds prefixes when the prefix changes mid-XPath.
prefix_to_module replaces prefixes with the module name of the prefix.
resolve_top_prefix_to_module resolves the module-level prefix
to the module name.
Prefixes may be included in the path if the prefix changes mid-path.
"""
resolved_names = mk_path_list(stmt)
xpath_elements = []
last_prefix = None
for index, resolved_name in enumerate(resolved_names):
module_name, prefix, node_name = resolved_name
xpath_element = node_name
if with_prefixes or (prefix_onchange and prefix != last_prefix):
new_prefix = prefix
if (prefix_to_module or
(index == 0 and resolve_top_prefix_to_module)):
new_prefix = module_name
xpath_element = '%s:%s' % (new_prefix, node_name)
xpath_elements.append(xpath_element)
last_prefix = prefix
return '/%s' % '/'.join(xpath_elements) | python | def mk_path_str(stmt,
with_prefixes=False,
prefix_onchange=False,
prefix_to_module=False,
resolve_top_prefix_to_module=False):
"""Returns the XPath path of the node.
with_prefixes indicates whether or not to prefix every node.
prefix_onchange modifies the behavior of with_prefixes and
only adds prefixes when the prefix changes mid-XPath.
prefix_to_module replaces prefixes with the module name of the prefix.
resolve_top_prefix_to_module resolves the module-level prefix
to the module name.
Prefixes may be included in the path if the prefix changes mid-path.
"""
resolved_names = mk_path_list(stmt)
xpath_elements = []
last_prefix = None
for index, resolved_name in enumerate(resolved_names):
module_name, prefix, node_name = resolved_name
xpath_element = node_name
if with_prefixes or (prefix_onchange and prefix != last_prefix):
new_prefix = prefix
if (prefix_to_module or
(index == 0 and resolve_top_prefix_to_module)):
new_prefix = module_name
xpath_element = '%s:%s' % (new_prefix, node_name)
xpath_elements.append(xpath_element)
last_prefix = prefix
return '/%s' % '/'.join(xpath_elements) | [
"def",
"mk_path_str",
"(",
"stmt",
",",
"with_prefixes",
"=",
"False",
",",
"prefix_onchange",
"=",
"False",
",",
"prefix_to_module",
"=",
"False",
",",
"resolve_top_prefix_to_module",
"=",
"False",
")",
":",
"resolved_names",
"=",
"mk_path_list",
"(",
"stmt",
"... | Returns the XPath path of the node.
with_prefixes indicates whether or not to prefix every node.
prefix_onchange modifies the behavior of with_prefixes and
only adds prefixes when the prefix changes mid-XPath.
prefix_to_module replaces prefixes with the module name of the prefix.
resolve_top_prefix_to_module resolves the module-level prefix
to the module name.
Prefixes may be included in the path if the prefix changes mid-path. | [
"Returns",
"the",
"XPath",
"path",
"of",
"the",
"node",
".",
"with_prefixes",
"indicates",
"whether",
"or",
"not",
"to",
"prefix",
"every",
"node",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L3107-L3139 | train | 226,018 |
mbj4668/pyang | pyang/statements.py | get_xpath | def get_xpath(stmt, qualified=False, prefix_to_module=False):
"""Gets the XPath of the statement.
Unless qualified=True, does not include prefixes unless the prefix
changes mid-XPath.
qualified will add a prefix to each node.
prefix_to_module will resolve prefixes to module names instead.
For RFC 8040, set prefix_to_module=True:
/prefix:root/node/prefix:node/...
qualified=True:
/prefix:root/prefix:node/prefix:node/...
qualified=True, prefix_to_module=True:
/module:root/module:node/module:node/...
prefix_to_module=True: /module:root/node/module:node/...
"""
return mk_path_str(stmt, with_prefixes=qualified,
prefix_onchange=True, prefix_to_module=prefix_to_module) | python | def get_xpath(stmt, qualified=False, prefix_to_module=False):
"""Gets the XPath of the statement.
Unless qualified=True, does not include prefixes unless the prefix
changes mid-XPath.
qualified will add a prefix to each node.
prefix_to_module will resolve prefixes to module names instead.
For RFC 8040, set prefix_to_module=True:
/prefix:root/node/prefix:node/...
qualified=True:
/prefix:root/prefix:node/prefix:node/...
qualified=True, prefix_to_module=True:
/module:root/module:node/module:node/...
prefix_to_module=True: /module:root/node/module:node/...
"""
return mk_path_str(stmt, with_prefixes=qualified,
prefix_onchange=True, prefix_to_module=prefix_to_module) | [
"def",
"get_xpath",
"(",
"stmt",
",",
"qualified",
"=",
"False",
",",
"prefix_to_module",
"=",
"False",
")",
":",
"return",
"mk_path_str",
"(",
"stmt",
",",
"with_prefixes",
"=",
"qualified",
",",
"prefix_onchange",
"=",
"True",
",",
"prefix_to_module",
"=",
... | Gets the XPath of the statement.
Unless qualified=True, does not include prefixes unless the prefix
changes mid-XPath.
qualified will add a prefix to each node.
prefix_to_module will resolve prefixes to module names instead.
For RFC 8040, set prefix_to_module=True:
/prefix:root/node/prefix:node/...
qualified=True:
/prefix:root/prefix:node/prefix:node/...
qualified=True, prefix_to_module=True:
/module:root/module:node/module:node/...
prefix_to_module=True: /module:root/node/module:node/... | [
"Gets",
"the",
"XPath",
"of",
"the",
"statement",
".",
"Unless",
"qualified",
"=",
"True",
"does",
"not",
"include",
"prefixes",
"unless",
"the",
"prefix",
"changes",
"mid",
"-",
"XPath",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L3141-L3161 | train | 226,019 |
mbj4668/pyang | pyang/statements.py | get_qualified_type | def get_qualified_type(stmt):
"""Gets the qualified, top-level type of the node.
This enters the typedef if defined instead of using the prefix
to ensure absolute distinction.
"""
type_obj = stmt.search_one('type')
fq_type_name = None
if type_obj:
if getattr(type_obj, 'i_typedef', None):
# If type_obj has typedef, substitute.
# Absolute module:type instead of prefix:type
type_obj = type_obj.i_typedef
type_name = type_obj.arg
if check_primitive_type(type_obj):
# Doesn't make sense to qualify a primitive..I think.
fq_type_name = type_name
else:
type_module = type_obj.i_orig_module.arg
fq_type_name = '%s:%s' % (type_module, type_name)
return fq_type_name | python | def get_qualified_type(stmt):
"""Gets the qualified, top-level type of the node.
This enters the typedef if defined instead of using the prefix
to ensure absolute distinction.
"""
type_obj = stmt.search_one('type')
fq_type_name = None
if type_obj:
if getattr(type_obj, 'i_typedef', None):
# If type_obj has typedef, substitute.
# Absolute module:type instead of prefix:type
type_obj = type_obj.i_typedef
type_name = type_obj.arg
if check_primitive_type(type_obj):
# Doesn't make sense to qualify a primitive..I think.
fq_type_name = type_name
else:
type_module = type_obj.i_orig_module.arg
fq_type_name = '%s:%s' % (type_module, type_name)
return fq_type_name | [
"def",
"get_qualified_type",
"(",
"stmt",
")",
":",
"type_obj",
"=",
"stmt",
".",
"search_one",
"(",
"'type'",
")",
"fq_type_name",
"=",
"None",
"if",
"type_obj",
":",
"if",
"getattr",
"(",
"type_obj",
",",
"'i_typedef'",
",",
"None",
")",
":",
"# If type_... | Gets the qualified, top-level type of the node.
This enters the typedef if defined instead of using the prefix
to ensure absolute distinction. | [
"Gets",
"the",
"qualified",
"top",
"-",
"level",
"type",
"of",
"the",
"node",
".",
"This",
"enters",
"the",
"typedef",
"if",
"defined",
"instead",
"of",
"using",
"the",
"prefix",
"to",
"ensure",
"absolute",
"distinction",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L3171-L3190 | train | 226,020 |
mbj4668/pyang | pyang/statements.py | get_primitive_type | def get_primitive_type(stmt):
"""Recurses through the typedefs and returns
the most primitive YANG type defined.
"""
type_obj = stmt.search_one('type')
type_name = getattr(type_obj, 'arg', None)
typedef_obj = getattr(type_obj, 'i_typedef', None)
if typedef_obj:
type_name = get_primitive_type(typedef_obj)
elif type_obj and not check_primitive_type(type_obj):
raise Exception('%s is not a primitive! Incomplete parse tree?' %
type_name)
return type_name | python | def get_primitive_type(stmt):
"""Recurses through the typedefs and returns
the most primitive YANG type defined.
"""
type_obj = stmt.search_one('type')
type_name = getattr(type_obj, 'arg', None)
typedef_obj = getattr(type_obj, 'i_typedef', None)
if typedef_obj:
type_name = get_primitive_type(typedef_obj)
elif type_obj and not check_primitive_type(type_obj):
raise Exception('%s is not a primitive! Incomplete parse tree?' %
type_name)
return type_name | [
"def",
"get_primitive_type",
"(",
"stmt",
")",
":",
"type_obj",
"=",
"stmt",
".",
"search_one",
"(",
"'type'",
")",
"type_name",
"=",
"getattr",
"(",
"type_obj",
",",
"'arg'",
",",
"None",
")",
"typedef_obj",
"=",
"getattr",
"(",
"type_obj",
",",
"'i_typed... | Recurses through the typedefs and returns
the most primitive YANG type defined. | [
"Recurses",
"through",
"the",
"typedefs",
"and",
"returns",
"the",
"most",
"primitive",
"YANG",
"type",
"defined",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L3192-L3204 | train | 226,021 |
mbj4668/pyang | pyang/statements.py | Statement.search | def search(self, keyword, children=None, arg=None):
"""Return list of receiver's substmts with `keyword`.
"""
if children is None:
children = self.substmts
return [ ch for ch in children
if (ch.keyword == keyword and
(arg is None or ch.arg == arg))] | python | def search(self, keyword, children=None, arg=None):
"""Return list of receiver's substmts with `keyword`.
"""
if children is None:
children = self.substmts
return [ ch for ch in children
if (ch.keyword == keyword and
(arg is None or ch.arg == arg))] | [
"def",
"search",
"(",
"self",
",",
"keyword",
",",
"children",
"=",
"None",
",",
"arg",
"=",
"None",
")",
":",
"if",
"children",
"is",
"None",
":",
"children",
"=",
"self",
".",
"substmts",
"return",
"[",
"ch",
"for",
"ch",
"in",
"children",
"if",
... | Return list of receiver's substmts with `keyword`. | [
"Return",
"list",
"of",
"receiver",
"s",
"substmts",
"with",
"keyword",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L2819-L2826 | train | 226,022 |
mbj4668/pyang | pyang/statements.py | Statement.search_one | def search_one(self, keyword, arg=None, children=None):
"""Return receiver's substmt with `keyword` and optionally `arg`.
"""
if children is None:
children = self.substmts
for ch in children:
if ch.keyword == keyword and (arg is None or ch.arg == arg):
return ch
return None | python | def search_one(self, keyword, arg=None, children=None):
"""Return receiver's substmt with `keyword` and optionally `arg`.
"""
if children is None:
children = self.substmts
for ch in children:
if ch.keyword == keyword and (arg is None or ch.arg == arg):
return ch
return None | [
"def",
"search_one",
"(",
"self",
",",
"keyword",
",",
"arg",
"=",
"None",
",",
"children",
"=",
"None",
")",
":",
"if",
"children",
"is",
"None",
":",
"children",
"=",
"self",
".",
"substmts",
"for",
"ch",
"in",
"children",
":",
"if",
"ch",
".",
"... | Return receiver's substmt with `keyword` and optionally `arg`. | [
"Return",
"receiver",
"s",
"substmt",
"with",
"keyword",
"and",
"optionally",
"arg",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L2828-L2836 | train | 226,023 |
mbj4668/pyang | pyang/statements.py | Statement.main_module | def main_module(self):
"""Return the main module to which the receiver belongs."""
if self.i_module.keyword == "submodule":
return self.i_module.i_ctx.get_module(
self.i_module.i_including_modulename)
return self.i_module | python | def main_module(self):
"""Return the main module to which the receiver belongs."""
if self.i_module.keyword == "submodule":
return self.i_module.i_ctx.get_module(
self.i_module.i_including_modulename)
return self.i_module | [
"def",
"main_module",
"(",
"self",
")",
":",
"if",
"self",
".",
"i_module",
".",
"keyword",
"==",
"\"submodule\"",
":",
"return",
"self",
".",
"i_module",
".",
"i_ctx",
".",
"get_module",
"(",
"self",
".",
"i_module",
".",
"i_including_modulename",
")",
"r... | Return the main module to which the receiver belongs. | [
"Return",
"the",
"main",
"module",
"to",
"which",
"the",
"receiver",
"belongs",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L2866-L2871 | train | 226,024 |
mbj4668/pyang | pyang/xpath.py | add_prefix | def add_prefix(prefix, s):
"Add `prefix` to all unprefixed names in `s`"
# tokenize the XPath expression
toks = xpath_lexer.scan(s)
# add default prefix to unprefixed names
toks2 = [_add_prefix(prefix, tok) for tok in toks]
# build a string of the patched expression
ls = [x.value for x in toks2]
return ''.join(ls) | python | def add_prefix(prefix, s):
"Add `prefix` to all unprefixed names in `s`"
# tokenize the XPath expression
toks = xpath_lexer.scan(s)
# add default prefix to unprefixed names
toks2 = [_add_prefix(prefix, tok) for tok in toks]
# build a string of the patched expression
ls = [x.value for x in toks2]
return ''.join(ls) | [
"def",
"add_prefix",
"(",
"prefix",
",",
"s",
")",
":",
"# tokenize the XPath expression",
"toks",
"=",
"xpath_lexer",
".",
"scan",
"(",
"s",
")",
"# add default prefix to unprefixed names",
"toks2",
"=",
"[",
"_add_prefix",
"(",
"prefix",
",",
"tok",
")",
"for"... | Add `prefix` to all unprefixed names in `s` | [
"Add",
"prefix",
"to",
"all",
"unprefixed",
"names",
"in",
"s"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/xpath.py#L56-L64 | train | 226,025 |
mbj4668/pyang | pyang/syntax.py | chk_date_arg | def chk_date_arg(s):
"""Checks if the string `s` is a valid date string.
Return True of False."""
if re_date.search(s) is None:
return False
comp = s.split('-')
try:
dt = datetime.date(int(comp[0]), int(comp[1]), int(comp[2]))
return True
except Exception as e:
return False | python | def chk_date_arg(s):
"""Checks if the string `s` is a valid date string.
Return True of False."""
if re_date.search(s) is None:
return False
comp = s.split('-')
try:
dt = datetime.date(int(comp[0]), int(comp[1]), int(comp[2]))
return True
except Exception as e:
return False | [
"def",
"chk_date_arg",
"(",
"s",
")",
":",
"if",
"re_date",
".",
"search",
"(",
"s",
")",
"is",
"None",
":",
"return",
"False",
"comp",
"=",
"s",
".",
"split",
"(",
"'-'",
")",
"try",
":",
"dt",
"=",
"datetime",
".",
"date",
"(",
"int",
"(",
"c... | Checks if the string `s` is a valid date string.
Return True of False. | [
"Checks",
"if",
"the",
"string",
"s",
"is",
"a",
"valid",
"date",
"string",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/syntax.py#L173-L184 | train | 226,026 |
mbj4668/pyang | pyang/syntax.py | chk_enum_arg | def chk_enum_arg(s):
"""Checks if the string `s` is a valid enum string.
Return True or False."""
if len(s) == 0 or s[0].isspace() or s[-1].isspace():
return False
else:
return True | python | def chk_enum_arg(s):
"""Checks if the string `s` is a valid enum string.
Return True or False."""
if len(s) == 0 or s[0].isspace() or s[-1].isspace():
return False
else:
return True | [
"def",
"chk_enum_arg",
"(",
"s",
")",
":",
"if",
"len",
"(",
"s",
")",
"==",
"0",
"or",
"s",
"[",
"0",
"]",
".",
"isspace",
"(",
")",
"or",
"s",
"[",
"-",
"1",
"]",
".",
"isspace",
"(",
")",
":",
"return",
"False",
"else",
":",
"return",
"T... | Checks if the string `s` is a valid enum string.
Return True or False. | [
"Checks",
"if",
"the",
"string",
"s",
"is",
"a",
"valid",
"enum",
"string",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/syntax.py#L186-L194 | train | 226,027 |
mbj4668/pyang | pyang/syntax.py | chk_fraction_digits_arg | def chk_fraction_digits_arg(s):
"""Checks if the string `s` is a valid fraction-digits argument.
Return True or False."""
try:
v = int(s)
if v >= 1 and v <= 18:
return True
else:
return False
except ValueError:
return False | python | def chk_fraction_digits_arg(s):
"""Checks if the string `s` is a valid fraction-digits argument.
Return True or False."""
try:
v = int(s)
if v >= 1 and v <= 18:
return True
else:
return False
except ValueError:
return False | [
"def",
"chk_fraction_digits_arg",
"(",
"s",
")",
":",
"try",
":",
"v",
"=",
"int",
"(",
"s",
")",
"if",
"v",
">=",
"1",
"and",
"v",
"<=",
"18",
":",
"return",
"True",
"else",
":",
"return",
"False",
"except",
"ValueError",
":",
"return",
"False"
] | Checks if the string `s` is a valid fraction-digits argument.
Return True or False. | [
"Checks",
"if",
"the",
"string",
"s",
"is",
"a",
"valid",
"fraction",
"-",
"digits",
"argument",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/syntax.py#L196-L207 | train | 226,028 |
mbj4668/pyang | pyang/translators/dsdl.py | Patch.combine | def combine(self, patch):
"""Add `patch.plist` to `self.plist`."""
exclusive = set(["config", "default", "mandatory", "presence",
"min-elements", "max-elements"])
kws = set([s.keyword for s in self.plist]) & exclusive
add = [n for n in patch.plist if n.keyword not in kws]
self.plist.extend(add) | python | def combine(self, patch):
"""Add `patch.plist` to `self.plist`."""
exclusive = set(["config", "default", "mandatory", "presence",
"min-elements", "max-elements"])
kws = set([s.keyword for s in self.plist]) & exclusive
add = [n for n in patch.plist if n.keyword not in kws]
self.plist.extend(add) | [
"def",
"combine",
"(",
"self",
",",
"patch",
")",
":",
"exclusive",
"=",
"set",
"(",
"[",
"\"config\"",
",",
"\"default\"",
",",
"\"mandatory\"",
",",
"\"presence\"",
",",
"\"min-elements\"",
",",
"\"max-elements\"",
"]",
")",
"kws",
"=",
"set",
"(",
"[",
... | Add `patch.plist` to `self.plist`. | [
"Add",
"patch",
".",
"plist",
"to",
"self",
".",
"plist",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L136-L142 | train | 226,029 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.serialize | def serialize(self):
"""Return the string representation of the receiver."""
res = '<?xml version="1.0" encoding="UTF-8"?>'
for ns in self.namespaces:
self.top_grammar.attr["xmlns:" + self.namespaces[ns]] = ns
res += self.top_grammar.start_tag()
for ch in self.top_grammar.children:
res += ch.serialize()
res += self.tree.serialize()
for d in self.global_defs:
res += self.global_defs[d].serialize()
for i in self.identities:
res += self.identities[i].serialize()
return res + self.top_grammar.end_tag() | python | def serialize(self):
"""Return the string representation of the receiver."""
res = '<?xml version="1.0" encoding="UTF-8"?>'
for ns in self.namespaces:
self.top_grammar.attr["xmlns:" + self.namespaces[ns]] = ns
res += self.top_grammar.start_tag()
for ch in self.top_grammar.children:
res += ch.serialize()
res += self.tree.serialize()
for d in self.global_defs:
res += self.global_defs[d].serialize()
for i in self.identities:
res += self.identities[i].serialize()
return res + self.top_grammar.end_tag() | [
"def",
"serialize",
"(",
"self",
")",
":",
"res",
"=",
"'<?xml version=\"1.0\" encoding=\"UTF-8\"?>'",
"for",
"ns",
"in",
"self",
".",
"namespaces",
":",
"self",
".",
"top_grammar",
".",
"attr",
"[",
"\"xmlns:\"",
"+",
"self",
".",
"namespaces",
"[",
"ns",
"... | Return the string representation of the receiver. | [
"Return",
"the",
"string",
"representation",
"of",
"the",
"receiver",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L340-L353 | train | 226,030 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.setup_top | def setup_top(self):
"""Create top-level elements of the hybrid schema."""
self.top_grammar = SchemaNode("grammar")
self.top_grammar.attr = {
"xmlns": "http://relaxng.org/ns/structure/1.0",
"datatypeLibrary": "http://www.w3.org/2001/XMLSchema-datatypes"}
self.tree = SchemaNode("start") | python | def setup_top(self):
"""Create top-level elements of the hybrid schema."""
self.top_grammar = SchemaNode("grammar")
self.top_grammar.attr = {
"xmlns": "http://relaxng.org/ns/structure/1.0",
"datatypeLibrary": "http://www.w3.org/2001/XMLSchema-datatypes"}
self.tree = SchemaNode("start") | [
"def",
"setup_top",
"(",
"self",
")",
":",
"self",
".",
"top_grammar",
"=",
"SchemaNode",
"(",
"\"grammar\"",
")",
"self",
".",
"top_grammar",
".",
"attr",
"=",
"{",
"\"xmlns\"",
":",
"\"http://relaxng.org/ns/structure/1.0\"",
",",
"\"datatypeLibrary\"",
":",
"\... | Create top-level elements of the hybrid schema. | [
"Create",
"top",
"-",
"level",
"elements",
"of",
"the",
"hybrid",
"schema",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L424-L430 | train | 226,031 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.create_roots | def create_roots(self, yam):
"""Create the top-level structure for module `yam`."""
self.local_grammar = SchemaNode("grammar")
self.local_grammar.attr = {
"ns": yam.search_one("namespace").arg,
"nma:module": self.module.arg}
src_text = "YANG module '%s'" % yam.arg
revs = yam.search("revision")
if len(revs) > 0:
src_text += " revision %s" % self.current_revision(revs)
self.dc_element(self.local_grammar, "source", src_text)
start = SchemaNode("start", self.local_grammar)
self.data = SchemaNode("nma:data", start, interleave=True)
self.data.occur = 2
self.rpcs = SchemaNode("nma:rpcs", start, interleave=False)
self.notifications = SchemaNode("nma:notifications", start,
interleave=False) | python | def create_roots(self, yam):
"""Create the top-level structure for module `yam`."""
self.local_grammar = SchemaNode("grammar")
self.local_grammar.attr = {
"ns": yam.search_one("namespace").arg,
"nma:module": self.module.arg}
src_text = "YANG module '%s'" % yam.arg
revs = yam.search("revision")
if len(revs) > 0:
src_text += " revision %s" % self.current_revision(revs)
self.dc_element(self.local_grammar, "source", src_text)
start = SchemaNode("start", self.local_grammar)
self.data = SchemaNode("nma:data", start, interleave=True)
self.data.occur = 2
self.rpcs = SchemaNode("nma:rpcs", start, interleave=False)
self.notifications = SchemaNode("nma:notifications", start,
interleave=False) | [
"def",
"create_roots",
"(",
"self",
",",
"yam",
")",
":",
"self",
".",
"local_grammar",
"=",
"SchemaNode",
"(",
"\"grammar\"",
")",
"self",
".",
"local_grammar",
".",
"attr",
"=",
"{",
"\"ns\"",
":",
"yam",
".",
"search_one",
"(",
"\"namespace\"",
")",
"... | Create the top-level structure for module `yam`. | [
"Create",
"the",
"top",
"-",
"level",
"structure",
"for",
"module",
"yam",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L432-L448 | train | 226,032 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.yang_to_xpath | def yang_to_xpath(self, xpe):
"""Transform YANG's `xpath` to a form suitable for Schematron.
1. Prefixes are added to unprefixed local names. Inside global
groupings, the prefix is represented as the variable
'$pref' which is substituted via Schematron abstract
patterns.
2. '$root' is prepended to every absolute location path.
"""
if self.gg_level:
pref = "$pref:"
else:
pref = self.prefix_stack[-1] + ":"
toks = xpath_lexer.scan(xpe)
prev = None
res = ""
for tok in toks:
if (tok.type == "SLASH" and
prev not in ("DOT", "DOTDOT", "RPAREN", "RBRACKET", "name",
"wildcard", "prefix_test")):
res += "$root"
elif tok.type == "name" and ":" not in tok.value:
res += pref
res += tok.value
if tok.type != "_whitespace": prev = tok.type
return res | python | def yang_to_xpath(self, xpe):
"""Transform YANG's `xpath` to a form suitable for Schematron.
1. Prefixes are added to unprefixed local names. Inside global
groupings, the prefix is represented as the variable
'$pref' which is substituted via Schematron abstract
patterns.
2. '$root' is prepended to every absolute location path.
"""
if self.gg_level:
pref = "$pref:"
else:
pref = self.prefix_stack[-1] + ":"
toks = xpath_lexer.scan(xpe)
prev = None
res = ""
for tok in toks:
if (tok.type == "SLASH" and
prev not in ("DOT", "DOTDOT", "RPAREN", "RBRACKET", "name",
"wildcard", "prefix_test")):
res += "$root"
elif tok.type == "name" and ":" not in tok.value:
res += pref
res += tok.value
if tok.type != "_whitespace": prev = tok.type
return res | [
"def",
"yang_to_xpath",
"(",
"self",
",",
"xpe",
")",
":",
"if",
"self",
".",
"gg_level",
":",
"pref",
"=",
"\"$pref:\"",
"else",
":",
"pref",
"=",
"self",
".",
"prefix_stack",
"[",
"-",
"1",
"]",
"+",
"\":\"",
"toks",
"=",
"xpath_lexer",
".",
"scan"... | Transform YANG's `xpath` to a form suitable for Schematron.
1. Prefixes are added to unprefixed local names. Inside global
groupings, the prefix is represented as the variable
'$pref' which is substituted via Schematron abstract
patterns.
2. '$root' is prepended to every absolute location path. | [
"Transform",
"YANG",
"s",
"xpath",
"to",
"a",
"form",
"suitable",
"for",
"Schematron",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L450-L475 | train | 226,033 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.register_identity | def register_identity(self, id_stmt):
"""Register `id_stmt` with its base identity, if any.
"""
bst = id_stmt.search_one("base")
if bst:
bder = self.identity_deps.setdefault(bst.i_identity, [])
bder.append(id_stmt) | python | def register_identity(self, id_stmt):
"""Register `id_stmt` with its base identity, if any.
"""
bst = id_stmt.search_one("base")
if bst:
bder = self.identity_deps.setdefault(bst.i_identity, [])
bder.append(id_stmt) | [
"def",
"register_identity",
"(",
"self",
",",
"id_stmt",
")",
":",
"bst",
"=",
"id_stmt",
".",
"search_one",
"(",
"\"base\"",
")",
"if",
"bst",
":",
"bder",
"=",
"self",
".",
"identity_deps",
".",
"setdefault",
"(",
"bst",
".",
"i_identity",
",",
"[",
... | Register `id_stmt` with its base identity, if any. | [
"Register",
"id_stmt",
"with",
"its",
"base",
"identity",
"if",
"any",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L498-L504 | train | 226,034 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.add_derived_identity | def add_derived_identity(self, id_stmt):
"""Add pattern def for `id_stmt` and all derived identities.
The corresponding "ref" pattern is returned.
"""
p = self.add_namespace(id_stmt.main_module())
if id_stmt not in self.identities: # add named pattern def
self.identities[id_stmt] = SchemaNode.define("__%s_%s" %
(p, id_stmt.arg))
parent = self.identities[id_stmt]
if id_stmt in self.identity_deps:
parent = SchemaNode.choice(parent, occur=2)
for i in self.identity_deps[id_stmt]:
parent.subnode(self.add_derived_identity(i))
idval = SchemaNode("value", parent, p+":"+id_stmt.arg)
idval.attr["type"] = "QName"
res = SchemaNode("ref")
res.attr["name"] = self.identities[id_stmt].attr["name"]
return res | python | def add_derived_identity(self, id_stmt):
"""Add pattern def for `id_stmt` and all derived identities.
The corresponding "ref" pattern is returned.
"""
p = self.add_namespace(id_stmt.main_module())
if id_stmt not in self.identities: # add named pattern def
self.identities[id_stmt] = SchemaNode.define("__%s_%s" %
(p, id_stmt.arg))
parent = self.identities[id_stmt]
if id_stmt in self.identity_deps:
parent = SchemaNode.choice(parent, occur=2)
for i in self.identity_deps[id_stmt]:
parent.subnode(self.add_derived_identity(i))
idval = SchemaNode("value", parent, p+":"+id_stmt.arg)
idval.attr["type"] = "QName"
res = SchemaNode("ref")
res.attr["name"] = self.identities[id_stmt].attr["name"]
return res | [
"def",
"add_derived_identity",
"(",
"self",
",",
"id_stmt",
")",
":",
"p",
"=",
"self",
".",
"add_namespace",
"(",
"id_stmt",
".",
"main_module",
"(",
")",
")",
"if",
"id_stmt",
"not",
"in",
"self",
".",
"identities",
":",
"# add named pattern def",
"self",
... | Add pattern def for `id_stmt` and all derived identities.
The corresponding "ref" pattern is returned. | [
"Add",
"pattern",
"def",
"for",
"id_stmt",
"and",
"all",
"derived",
"identities",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L506-L524 | train | 226,035 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.preload_defs | def preload_defs(self):
"""Preload all top-level definitions."""
for d in (self.module.search("grouping") +
self.module.search("typedef")):
uname, dic = self.unique_def_name(d)
self.install_def(uname, d, dic) | python | def preload_defs(self):
"""Preload all top-level definitions."""
for d in (self.module.search("grouping") +
self.module.search("typedef")):
uname, dic = self.unique_def_name(d)
self.install_def(uname, d, dic) | [
"def",
"preload_defs",
"(",
"self",
")",
":",
"for",
"d",
"in",
"(",
"self",
".",
"module",
".",
"search",
"(",
"\"grouping\"",
")",
"+",
"self",
".",
"module",
".",
"search",
"(",
"\"typedef\"",
")",
")",
":",
"uname",
",",
"dic",
"=",
"self",
"."... | Preload all top-level definitions. | [
"Preload",
"all",
"top",
"-",
"level",
"definitions",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L526-L531 | train | 226,036 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.add_prefix | def add_prefix(self, name, stmt):
"""Return `name` prepended with correct prefix.
If the name is already prefixed, the prefix may be translated
to the value obtained from `self.module_prefixes`. Unmodified
`name` is returned if we are inside a global grouping.
"""
if self.gg_level: return name
pref, colon, local = name.partition(":")
if colon:
return (self.module_prefixes[stmt.i_module.i_prefixes[pref][0]]
+ ":" + local)
else:
return self.prefix_stack[-1] + ":" + pref | python | def add_prefix(self, name, stmt):
"""Return `name` prepended with correct prefix.
If the name is already prefixed, the prefix may be translated
to the value obtained from `self.module_prefixes`. Unmodified
`name` is returned if we are inside a global grouping.
"""
if self.gg_level: return name
pref, colon, local = name.partition(":")
if colon:
return (self.module_prefixes[stmt.i_module.i_prefixes[pref][0]]
+ ":" + local)
else:
return self.prefix_stack[-1] + ":" + pref | [
"def",
"add_prefix",
"(",
"self",
",",
"name",
",",
"stmt",
")",
":",
"if",
"self",
".",
"gg_level",
":",
"return",
"name",
"pref",
",",
"colon",
",",
"local",
"=",
"name",
".",
"partition",
"(",
"\":\"",
")",
"if",
"colon",
":",
"return",
"(",
"se... | Return `name` prepended with correct prefix.
If the name is already prefixed, the prefix may be translated
to the value obtained from `self.module_prefixes`. Unmodified
`name` is returned if we are inside a global grouping. | [
"Return",
"name",
"prepended",
"with",
"correct",
"prefix",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L533-L546 | train | 226,037 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.dc_element | def dc_element(self, parent, name, text):
"""Add DC element `name` containing `text` to `parent`."""
if self.dc_uri in self.namespaces:
dcel = SchemaNode(self.namespaces[self.dc_uri] + ":" + name,
text=text)
parent.children.insert(0,dcel) | python | def dc_element(self, parent, name, text):
"""Add DC element `name` containing `text` to `parent`."""
if self.dc_uri in self.namespaces:
dcel = SchemaNode(self.namespaces[self.dc_uri] + ":" + name,
text=text)
parent.children.insert(0,dcel) | [
"def",
"dc_element",
"(",
"self",
",",
"parent",
",",
"name",
",",
"text",
")",
":",
"if",
"self",
".",
"dc_uri",
"in",
"self",
".",
"namespaces",
":",
"dcel",
"=",
"SchemaNode",
"(",
"self",
".",
"namespaces",
"[",
"self",
".",
"dc_uri",
"]",
"+",
... | Add DC element `name` containing `text` to `parent`. | [
"Add",
"DC",
"element",
"name",
"containing",
"text",
"to",
"parent",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L557-L562 | train | 226,038 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.get_default | def get_default(self, stmt, refd):
"""Return default value for `stmt` node.
`refd` is a dictionary of applicable refinements that is
constructed in the `process_patches` method.
"""
if refd["default"]:
return refd["default"]
defst = stmt.search_one("default")
if defst:
return defst.arg
return None | python | def get_default(self, stmt, refd):
"""Return default value for `stmt` node.
`refd` is a dictionary of applicable refinements that is
constructed in the `process_patches` method.
"""
if refd["default"]:
return refd["default"]
defst = stmt.search_one("default")
if defst:
return defst.arg
return None | [
"def",
"get_default",
"(",
"self",
",",
"stmt",
",",
"refd",
")",
":",
"if",
"refd",
"[",
"\"default\"",
"]",
":",
"return",
"refd",
"[",
"\"default\"",
"]",
"defst",
"=",
"stmt",
".",
"search_one",
"(",
"\"default\"",
")",
"if",
"defst",
":",
"return"... | Return default value for `stmt` node.
`refd` is a dictionary of applicable refinements that is
constructed in the `process_patches` method. | [
"Return",
"default",
"value",
"for",
"stmt",
"node",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L564-L575 | train | 226,039 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.add_patch | def add_patch(self, pset, augref):
"""Add patch corresponding to `augref` to `pset`.
`augref` must be either 'augment' or 'refine' statement.
"""
try:
path = [ self.add_prefix(c, augref)
for c in augref.arg.split("/") if c ]
except KeyError:
# augment of a module that's not among input modules
return
car = path[0]
patch = Patch(path[1:], augref)
if car in pset:
sel = [ x for x in pset[car] if patch.path == x.path ]
if sel:
sel[0].combine(patch)
else:
pset[car].append(patch)
else:
pset[car] = [patch] | python | def add_patch(self, pset, augref):
"""Add patch corresponding to `augref` to `pset`.
`augref` must be either 'augment' or 'refine' statement.
"""
try:
path = [ self.add_prefix(c, augref)
for c in augref.arg.split("/") if c ]
except KeyError:
# augment of a module that's not among input modules
return
car = path[0]
patch = Patch(path[1:], augref)
if car in pset:
sel = [ x for x in pset[car] if patch.path == x.path ]
if sel:
sel[0].combine(patch)
else:
pset[car].append(patch)
else:
pset[car] = [patch] | [
"def",
"add_patch",
"(",
"self",
",",
"pset",
",",
"augref",
")",
":",
"try",
":",
"path",
"=",
"[",
"self",
".",
"add_prefix",
"(",
"c",
",",
"augref",
")",
"for",
"c",
"in",
"augref",
".",
"arg",
".",
"split",
"(",
"\"/\"",
")",
"if",
"c",
"]... | Add patch corresponding to `augref` to `pset`.
`augref` must be either 'augment' or 'refine' statement. | [
"Add",
"patch",
"corresponding",
"to",
"augref",
"to",
"pset",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L598-L618 | train | 226,040 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.apply_augments | def apply_augments(self, auglist, p_elem, pset):
"""Handle substatements of augments from `auglist`.
The augments are applied in the context of `p_elem`. `pset`
is a patch set containing patches that may be applicable to
descendants.
"""
for a in auglist:
par = a.parent
if a.search_one("when") is None:
wel = p_elem
else:
if p_elem.interleave:
kw = "interleave"
else:
kw = "group"
wel = SchemaNode(kw, p_elem, interleave=p_elem.interleave)
wel.occur = p_elem.occur
if par.keyword == "uses":
self.handle_substmts(a, wel, pset)
continue
if par.keyword == "submodule":
mnam = par.i_including_modulename
else:
mnam = par.arg
if self.prefix_stack[-1] == self.module_prefixes[mnam]:
self.handle_substmts(a, wel, pset)
else:
self.prefix_stack.append(self.module_prefixes[mnam])
self.handle_substmts(a, wel, pset)
self.prefix_stack.pop() | python | def apply_augments(self, auglist, p_elem, pset):
"""Handle substatements of augments from `auglist`.
The augments are applied in the context of `p_elem`. `pset`
is a patch set containing patches that may be applicable to
descendants.
"""
for a in auglist:
par = a.parent
if a.search_one("when") is None:
wel = p_elem
else:
if p_elem.interleave:
kw = "interleave"
else:
kw = "group"
wel = SchemaNode(kw, p_elem, interleave=p_elem.interleave)
wel.occur = p_elem.occur
if par.keyword == "uses":
self.handle_substmts(a, wel, pset)
continue
if par.keyword == "submodule":
mnam = par.i_including_modulename
else:
mnam = par.arg
if self.prefix_stack[-1] == self.module_prefixes[mnam]:
self.handle_substmts(a, wel, pset)
else:
self.prefix_stack.append(self.module_prefixes[mnam])
self.handle_substmts(a, wel, pset)
self.prefix_stack.pop() | [
"def",
"apply_augments",
"(",
"self",
",",
"auglist",
",",
"p_elem",
",",
"pset",
")",
":",
"for",
"a",
"in",
"auglist",
":",
"par",
"=",
"a",
".",
"parent",
"if",
"a",
".",
"search_one",
"(",
"\"when\"",
")",
"is",
"None",
":",
"wel",
"=",
"p_elem... | Handle substatements of augments from `auglist`.
The augments are applied in the context of `p_elem`. `pset`
is a patch set containing patches that may be applicable to
descendants. | [
"Handle",
"substatements",
"of",
"augments",
"from",
"auglist",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L620-L650 | train | 226,041 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.current_revision | def current_revision(self, r_stmts):
"""Pick the most recent revision date.
`r_stmts` is a list of 'revision' statements.
"""
cur = max([[int(p) for p in r.arg.split("-")] for r in r_stmts])
return "%4d-%02d-%02d" % tuple(cur) | python | def current_revision(self, r_stmts):
"""Pick the most recent revision date.
`r_stmts` is a list of 'revision' statements.
"""
cur = max([[int(p) for p in r.arg.split("-")] for r in r_stmts])
return "%4d-%02d-%02d" % tuple(cur) | [
"def",
"current_revision",
"(",
"self",
",",
"r_stmts",
")",
":",
"cur",
"=",
"max",
"(",
"[",
"[",
"int",
"(",
"p",
")",
"for",
"p",
"in",
"r",
".",
"arg",
".",
"split",
"(",
"\"-\"",
")",
"]",
"for",
"r",
"in",
"r_stmts",
"]",
")",
"return",
... | Pick the most recent revision date.
`r_stmts` is a list of 'revision' statements. | [
"Pick",
"the",
"most",
"recent",
"revision",
"date",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L652-L658 | train | 226,042 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.install_def | def install_def(self, name, dstmt, def_map, interleave=False):
"""Install definition `name` into the appropriate dictionary.
`dstmt` is the definition statement ('typedef' or 'grouping')
that is to be mapped to a RELAX NG named pattern '<define
name="`name`">'. `def_map` must be either `self.local_defs` or
`self.global_defs`. `interleave` determines the interleave
status inside the definition.
"""
delem = SchemaNode.define(name, interleave=interleave)
delem.attr["name"] = name
def_map[name] = delem
if def_map is self.global_defs: self.gg_level += 1
self.handle_substmts(dstmt, delem)
if def_map is self.global_defs: self.gg_level -= 1 | python | def install_def(self, name, dstmt, def_map, interleave=False):
"""Install definition `name` into the appropriate dictionary.
`dstmt` is the definition statement ('typedef' or 'grouping')
that is to be mapped to a RELAX NG named pattern '<define
name="`name`">'. `def_map` must be either `self.local_defs` or
`self.global_defs`. `interleave` determines the interleave
status inside the definition.
"""
delem = SchemaNode.define(name, interleave=interleave)
delem.attr["name"] = name
def_map[name] = delem
if def_map is self.global_defs: self.gg_level += 1
self.handle_substmts(dstmt, delem)
if def_map is self.global_defs: self.gg_level -= 1 | [
"def",
"install_def",
"(",
"self",
",",
"name",
",",
"dstmt",
",",
"def_map",
",",
"interleave",
"=",
"False",
")",
":",
"delem",
"=",
"SchemaNode",
".",
"define",
"(",
"name",
",",
"interleave",
"=",
"interleave",
")",
"delem",
".",
"attr",
"[",
"\"na... | Install definition `name` into the appropriate dictionary.
`dstmt` is the definition statement ('typedef' or 'grouping')
that is to be mapped to a RELAX NG named pattern '<define
name="`name`">'. `def_map` must be either `self.local_defs` or
`self.global_defs`. `interleave` determines the interleave
status inside the definition. | [
"Install",
"definition",
"name",
"into",
"the",
"appropriate",
"dictionary",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L666-L680 | train | 226,043 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.rng_annotation | def rng_annotation(self, stmt, p_elem):
"""Append YIN representation of extension statement `stmt`."""
ext = stmt.i_extension
prf, extkw = stmt.raw_keyword
(modname,rev)=stmt.i_module.i_prefixes[prf]
prefix = self.add_namespace(
statements.modulename_to_module(self.module,modname,rev))
eel = SchemaNode(prefix + ":" + extkw, p_elem)
argst = ext.search_one("argument")
if argst:
if argst.search_one("yin-element", "true"):
SchemaNode(prefix + ":" + argst.arg, eel, stmt.arg)
else:
eel.attr[argst.arg] = stmt.arg
self.handle_substmts(stmt, eel) | python | def rng_annotation(self, stmt, p_elem):
"""Append YIN representation of extension statement `stmt`."""
ext = stmt.i_extension
prf, extkw = stmt.raw_keyword
(modname,rev)=stmt.i_module.i_prefixes[prf]
prefix = self.add_namespace(
statements.modulename_to_module(self.module,modname,rev))
eel = SchemaNode(prefix + ":" + extkw, p_elem)
argst = ext.search_one("argument")
if argst:
if argst.search_one("yin-element", "true"):
SchemaNode(prefix + ":" + argst.arg, eel, stmt.arg)
else:
eel.attr[argst.arg] = stmt.arg
self.handle_substmts(stmt, eel) | [
"def",
"rng_annotation",
"(",
"self",
",",
"stmt",
",",
"p_elem",
")",
":",
"ext",
"=",
"stmt",
".",
"i_extension",
"prf",
",",
"extkw",
"=",
"stmt",
".",
"raw_keyword",
"(",
"modname",
",",
"rev",
")",
"=",
"stmt",
".",
"i_module",
".",
"i_prefixes",
... | Append YIN representation of extension statement `stmt`. | [
"Append",
"YIN",
"representation",
"of",
"extension",
"statement",
"stmt",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L682-L696 | train | 226,044 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.propagate_occur | def propagate_occur(self, node, value):
"""Propagate occurence `value` to `node` and its ancestors.
Occurence values are defined and explained in the SchemaNode
class.
"""
while node.occur < value:
node.occur = value
if node.name == "define":
break
node = node.parent | python | def propagate_occur(self, node, value):
"""Propagate occurence `value` to `node` and its ancestors.
Occurence values are defined and explained in the SchemaNode
class.
"""
while node.occur < value:
node.occur = value
if node.name == "define":
break
node = node.parent | [
"def",
"propagate_occur",
"(",
"self",
",",
"node",
",",
"value",
")",
":",
"while",
"node",
".",
"occur",
"<",
"value",
":",
"node",
".",
"occur",
"=",
"value",
"if",
"node",
".",
"name",
"==",
"\"define\"",
":",
"break",
"node",
"=",
"node",
".",
... | Propagate occurence `value` to `node` and its ancestors.
Occurence values are defined and explained in the SchemaNode
class. | [
"Propagate",
"occurence",
"value",
"to",
"node",
"and",
"its",
"ancestors",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L698-L708 | train | 226,045 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.process_patches | def process_patches(self, pset, stmt, elem, altname=None):
"""Process patches for data node `name` from `pset`.
`stmt` provides the context in YANG and `elem` is the parent
element in the output schema. Refinements adding documentation
and changing the config status are immediately applied.
The returned tuple consists of:
- a dictionary of refinements, in which keys are the keywords
of the refinement statements and values are the new values
of refined parameters.
- a list of 'augment' statements that are to be applied
directly under `elem`.
- a new patch set containing patches applicable to
substatements of `stmt`.
"""
if altname:
name = altname
else:
name = stmt.arg
new_pset = {}
augments = []
refine_dict = dict.fromkeys(("presence", "default", "mandatory",
"min-elements", "max-elements"))
for p in pset.pop(self.add_prefix(name, stmt), []):
if p.path:
head = p.pop()
if head in new_pset:
new_pset[head].append(p)
else:
new_pset[head] = [p]
else:
for refaug in p.plist:
if refaug.keyword == "augment":
augments.append(refaug)
else:
for s in refaug.substmts:
if s.keyword == "description":
self.description_stmt(s, elem, None)
elif s.keyword == "reference":
self.reference_stmt(s, elem, None)
elif s.keyword == "must":
self.must_stmt(s, elem, None)
elif s.keyword == "config":
self.nma_attribute(s, elem)
elif refine_dict.get(s.keyword, False) is None:
refine_dict[s.keyword] = s.arg
return (refine_dict, augments, new_pset) | python | def process_patches(self, pset, stmt, elem, altname=None):
"""Process patches for data node `name` from `pset`.
`stmt` provides the context in YANG and `elem` is the parent
element in the output schema. Refinements adding documentation
and changing the config status are immediately applied.
The returned tuple consists of:
- a dictionary of refinements, in which keys are the keywords
of the refinement statements and values are the new values
of refined parameters.
- a list of 'augment' statements that are to be applied
directly under `elem`.
- a new patch set containing patches applicable to
substatements of `stmt`.
"""
if altname:
name = altname
else:
name = stmt.arg
new_pset = {}
augments = []
refine_dict = dict.fromkeys(("presence", "default", "mandatory",
"min-elements", "max-elements"))
for p in pset.pop(self.add_prefix(name, stmt), []):
if p.path:
head = p.pop()
if head in new_pset:
new_pset[head].append(p)
else:
new_pset[head] = [p]
else:
for refaug in p.plist:
if refaug.keyword == "augment":
augments.append(refaug)
else:
for s in refaug.substmts:
if s.keyword == "description":
self.description_stmt(s, elem, None)
elif s.keyword == "reference":
self.reference_stmt(s, elem, None)
elif s.keyword == "must":
self.must_stmt(s, elem, None)
elif s.keyword == "config":
self.nma_attribute(s, elem)
elif refine_dict.get(s.keyword, False) is None:
refine_dict[s.keyword] = s.arg
return (refine_dict, augments, new_pset) | [
"def",
"process_patches",
"(",
"self",
",",
"pset",
",",
"stmt",
",",
"elem",
",",
"altname",
"=",
"None",
")",
":",
"if",
"altname",
":",
"name",
"=",
"altname",
"else",
":",
"name",
"=",
"stmt",
".",
"arg",
"new_pset",
"=",
"{",
"}",
"augments",
... | Process patches for data node `name` from `pset`.
`stmt` provides the context in YANG and `elem` is the parent
element in the output schema. Refinements adding documentation
and changing the config status are immediately applied.
The returned tuple consists of:
- a dictionary of refinements, in which keys are the keywords
of the refinement statements and values are the new values
of refined parameters.
- a list of 'augment' statements that are to be applied
directly under `elem`.
- a new patch set containing patches applicable to
substatements of `stmt`. | [
"Process",
"patches",
"for",
"data",
"node",
"name",
"from",
"pset",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L710-L757 | train | 226,046 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.lookup_expand | def lookup_expand(self, stmt, names):
"""Find schema nodes under `stmt`, also in used groupings.
`names` is a list with qualified names of the schema nodes to
look up. All 'uses'/'grouping' pairs between `stmt` and found
schema nodes are marked for expansion.
"""
if not names: return []
todo = [stmt]
while todo:
pst = todo.pop()
for sub in pst.substmts:
if sub.keyword in self.schema_nodes:
qname = self.qname(sub)
if qname in names:
names.remove(qname)
par = sub.parent
while hasattr(par,"d_ref"): # par must be grouping
par.d_ref.d_expand = True
par = par.d_ref.parent
if not names: return [] # all found
elif sub.keyword == "uses":
g = sub.i_grouping
g.d_ref = sub
todo.append(g)
return names | python | def lookup_expand(self, stmt, names):
"""Find schema nodes under `stmt`, also in used groupings.
`names` is a list with qualified names of the schema nodes to
look up. All 'uses'/'grouping' pairs between `stmt` and found
schema nodes are marked for expansion.
"""
if not names: return []
todo = [stmt]
while todo:
pst = todo.pop()
for sub in pst.substmts:
if sub.keyword in self.schema_nodes:
qname = self.qname(sub)
if qname in names:
names.remove(qname)
par = sub.parent
while hasattr(par,"d_ref"): # par must be grouping
par.d_ref.d_expand = True
par = par.d_ref.parent
if not names: return [] # all found
elif sub.keyword == "uses":
g = sub.i_grouping
g.d_ref = sub
todo.append(g)
return names | [
"def",
"lookup_expand",
"(",
"self",
",",
"stmt",
",",
"names",
")",
":",
"if",
"not",
"names",
":",
"return",
"[",
"]",
"todo",
"=",
"[",
"stmt",
"]",
"while",
"todo",
":",
"pst",
"=",
"todo",
".",
"pop",
"(",
")",
"for",
"sub",
"in",
"pst",
"... | Find schema nodes under `stmt`, also in used groupings.
`names` is a list with qualified names of the schema nodes to
look up. All 'uses'/'grouping' pairs between `stmt` and found
schema nodes are marked for expansion. | [
"Find",
"schema",
"nodes",
"under",
"stmt",
"also",
"in",
"used",
"groupings",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L780-L805 | train | 226,047 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.type_with_ranges | def type_with_ranges(self, tchain, p_elem, rangekw, gen_data):
"""Handle types with 'range' or 'length' restrictions.
`tchain` is the chain of type definitions from which the
ranges may need to be extracted. `rangekw` is the statement
keyword determining the range type (either 'range' or
'length'). `gen_data` is a function that generates the
output schema node (a RELAX NG <data> pattern).
"""
ranges = self.get_ranges(tchain, rangekw)
if not ranges: return p_elem.subnode(gen_data())
if len(ranges) > 1:
p_elem = SchemaNode.choice(p_elem)
p_elem.occur = 2
for r in ranges:
d_elem = gen_data()
for p in self.range_params(r, rangekw):
d_elem.subnode(p)
p_elem.subnode(d_elem) | python | def type_with_ranges(self, tchain, p_elem, rangekw, gen_data):
"""Handle types with 'range' or 'length' restrictions.
`tchain` is the chain of type definitions from which the
ranges may need to be extracted. `rangekw` is the statement
keyword determining the range type (either 'range' or
'length'). `gen_data` is a function that generates the
output schema node (a RELAX NG <data> pattern).
"""
ranges = self.get_ranges(tchain, rangekw)
if not ranges: return p_elem.subnode(gen_data())
if len(ranges) > 1:
p_elem = SchemaNode.choice(p_elem)
p_elem.occur = 2
for r in ranges:
d_elem = gen_data()
for p in self.range_params(r, rangekw):
d_elem.subnode(p)
p_elem.subnode(d_elem) | [
"def",
"type_with_ranges",
"(",
"self",
",",
"tchain",
",",
"p_elem",
",",
"rangekw",
",",
"gen_data",
")",
":",
"ranges",
"=",
"self",
".",
"get_ranges",
"(",
"tchain",
",",
"rangekw",
")",
"if",
"not",
"ranges",
":",
"return",
"p_elem",
".",
"subnode",... | Handle types with 'range' or 'length' restrictions.
`tchain` is the chain of type definitions from which the
ranges may need to be extracted. `rangekw` is the statement
keyword determining the range type (either 'range' or
'length'). `gen_data` is a function that generates the
output schema node (a RELAX NG <data> pattern). | [
"Handle",
"types",
"with",
"range",
"or",
"length",
"restrictions",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L807-L825 | train | 226,048 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.get_ranges | def get_ranges(self, tchain, kw):
"""Return list of ranges defined in `tchain`.
`kw` is the statement keyword determining the type of the
range, i.e. 'range' or 'length'. `tchain` is the chain of type
definitions from which the resulting range is obtained.
The returned value is a list of tuples containing the segments
of the resulting range.
"""
(lo, hi) = ("min", "max")
ran = None
for t in tchain:
rstmt = t.search_one(kw)
if rstmt is None: continue
parts = [ p.strip() for p in rstmt.arg.split("|") ]
ran = [ [ i.strip() for i in p.split("..") ] for p in parts ]
if ran[0][0] != 'min': lo = ran[0][0]
if ran[-1][-1] != 'max': hi = ran[-1][-1]
if ran is None: return None
if len(ran) == 1:
return [(lo, hi)]
else:
return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)] | python | def get_ranges(self, tchain, kw):
"""Return list of ranges defined in `tchain`.
`kw` is the statement keyword determining the type of the
range, i.e. 'range' or 'length'. `tchain` is the chain of type
definitions from which the resulting range is obtained.
The returned value is a list of tuples containing the segments
of the resulting range.
"""
(lo, hi) = ("min", "max")
ran = None
for t in tchain:
rstmt = t.search_one(kw)
if rstmt is None: continue
parts = [ p.strip() for p in rstmt.arg.split("|") ]
ran = [ [ i.strip() for i in p.split("..") ] for p in parts ]
if ran[0][0] != 'min': lo = ran[0][0]
if ran[-1][-1] != 'max': hi = ran[-1][-1]
if ran is None: return None
if len(ran) == 1:
return [(lo, hi)]
else:
return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)] | [
"def",
"get_ranges",
"(",
"self",
",",
"tchain",
",",
"kw",
")",
":",
"(",
"lo",
",",
"hi",
")",
"=",
"(",
"\"min\"",
",",
"\"max\"",
")",
"ran",
"=",
"None",
"for",
"t",
"in",
"tchain",
":",
"rstmt",
"=",
"t",
".",
"search_one",
"(",
"kw",
")"... | Return list of ranges defined in `tchain`.
`kw` is the statement keyword determining the type of the
range, i.e. 'range' or 'length'. `tchain` is the chain of type
definitions from which the resulting range is obtained.
The returned value is a list of tuples containing the segments
of the resulting range. | [
"Return",
"list",
"of",
"ranges",
"defined",
"in",
"tchain",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L827-L850 | train | 226,049 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.handle_stmt | def handle_stmt(self, stmt, p_elem, pset={}):
"""
Run handler method for statement `stmt`.
`p_elem` is the parent node in the output schema. `pset` is
the current "patch set" - a dictionary with keys being QNames
of schema nodes at the current level of hierarchy for which
(or descendants thereof) any pending patches exist. The values
are instances of the Patch class.
All handler methods are defined below and must have the same
arguments as this method. They should create the output schema
fragment corresponding to `stmt`, apply all patches from
`pset` belonging to `stmt`, insert the fragment under `p_elem`
and perform all side effects as necessary.
"""
if self.debug > 0:
sys.stderr.write("Handling '%s %s'\n" %
(util.keyword_to_str(stmt.raw_keyword), stmt.arg))
try:
method = self.stmt_handler[stmt.keyword]
except KeyError:
if isinstance(stmt.keyword, tuple):
try:
method = self.ext_handler[stmt.keyword[0]][stmt.keyword[1]]
except KeyError:
method = self.rng_annotation
method(stmt, p_elem)
return
else:
raise error.EmitError(
"Unknown keyword %s - this should not happen.\n"
% stmt.keyword)
method(stmt, p_elem, pset) | python | def handle_stmt(self, stmt, p_elem, pset={}):
"""
Run handler method for statement `stmt`.
`p_elem` is the parent node in the output schema. `pset` is
the current "patch set" - a dictionary with keys being QNames
of schema nodes at the current level of hierarchy for which
(or descendants thereof) any pending patches exist. The values
are instances of the Patch class.
All handler methods are defined below and must have the same
arguments as this method. They should create the output schema
fragment corresponding to `stmt`, apply all patches from
`pset` belonging to `stmt`, insert the fragment under `p_elem`
and perform all side effects as necessary.
"""
if self.debug > 0:
sys.stderr.write("Handling '%s %s'\n" %
(util.keyword_to_str(stmt.raw_keyword), stmt.arg))
try:
method = self.stmt_handler[stmt.keyword]
except KeyError:
if isinstance(stmt.keyword, tuple):
try:
method = self.ext_handler[stmt.keyword[0]][stmt.keyword[1]]
except KeyError:
method = self.rng_annotation
method(stmt, p_elem)
return
else:
raise error.EmitError(
"Unknown keyword %s - this should not happen.\n"
% stmt.keyword)
method(stmt, p_elem, pset) | [
"def",
"handle_stmt",
"(",
"self",
",",
"stmt",
",",
"p_elem",
",",
"pset",
"=",
"{",
"}",
")",
":",
"if",
"self",
".",
"debug",
">",
"0",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Handling '%s %s'\\n\"",
"%",
"(",
"util",
".",
"keyword_to_str... | Run handler method for statement `stmt`.
`p_elem` is the parent node in the output schema. `pset` is
the current "patch set" - a dictionary with keys being QNames
of schema nodes at the current level of hierarchy for which
(or descendants thereof) any pending patches exist. The values
are instances of the Patch class.
All handler methods are defined below and must have the same
arguments as this method. They should create the output schema
fragment corresponding to `stmt`, apply all patches from
`pset` belonging to `stmt`, insert the fragment under `p_elem`
and perform all side effects as necessary. | [
"Run",
"handler",
"method",
"for",
"statement",
"stmt",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L882-L915 | train | 226,050 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.handle_substmts | def handle_substmts(self, stmt, p_elem, pset={}):
"""Handle all substatements of `stmt`."""
for sub in stmt.substmts:
self.handle_stmt(sub, p_elem, pset) | python | def handle_substmts(self, stmt, p_elem, pset={}):
"""Handle all substatements of `stmt`."""
for sub in stmt.substmts:
self.handle_stmt(sub, p_elem, pset) | [
"def",
"handle_substmts",
"(",
"self",
",",
"stmt",
",",
"p_elem",
",",
"pset",
"=",
"{",
"}",
")",
":",
"for",
"sub",
"in",
"stmt",
".",
"substmts",
":",
"self",
".",
"handle_stmt",
"(",
"sub",
",",
"p_elem",
",",
"pset",
")"
] | Handle all substatements of `stmt`. | [
"Handle",
"all",
"substatements",
"of",
"stmt",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L917-L920 | train | 226,051 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.nma_attribute | def nma_attribute(self, stmt, p_elem, pset=None):
"""Map `stmt` to a NETMOD-specific attribute.
The name of the attribute is the same as the 'keyword' of
`stmt`.
"""
att = "nma:" + stmt.keyword
if att not in p_elem.attr:
p_elem.attr[att] = stmt.arg | python | def nma_attribute(self, stmt, p_elem, pset=None):
"""Map `stmt` to a NETMOD-specific attribute.
The name of the attribute is the same as the 'keyword' of
`stmt`.
"""
att = "nma:" + stmt.keyword
if att not in p_elem.attr:
p_elem.attr[att] = stmt.arg | [
"def",
"nma_attribute",
"(",
"self",
",",
"stmt",
",",
"p_elem",
",",
"pset",
"=",
"None",
")",
":",
"att",
"=",
"\"nma:\"",
"+",
"stmt",
".",
"keyword",
"if",
"att",
"not",
"in",
"p_elem",
".",
"attr",
":",
"p_elem",
".",
"attr",
"[",
"att",
"]",
... | Map `stmt` to a NETMOD-specific attribute.
The name of the attribute is the same as the 'keyword' of
`stmt`. | [
"Map",
"stmt",
"to",
"a",
"NETMOD",
"-",
"specific",
"attribute",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L942-L950 | train | 226,052 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.type_stmt | def type_stmt(self, stmt, p_elem, pset):
"""Handle ``type`` statement.
Built-in types are handled by one of the specific type
callback methods defined below.
"""
typedef = stmt.i_typedef
if typedef and not stmt.i_is_derived: # just ref
uname, dic = self.unique_def_name(typedef)
if uname not in dic:
self.install_def(uname, typedef, dic)
SchemaNode("ref", p_elem).set_attr("name", uname)
defst = typedef.search_one("default")
if defst:
dic[uname].default = defst.arg
occur = 1
else:
occur = dic[uname].occur
if occur > 0: self.propagate_occur(p_elem, occur)
return
chain = [stmt]
tdefault = None
while typedef:
type_ = typedef.search_one("type")
chain.insert(0, type_)
if tdefault is None:
tdef = typedef.search_one("default")
if tdef:
tdefault = tdef.arg
typedef = type_.i_typedef
if tdefault and p_elem.occur == 0:
p_elem.default = tdefault
self.propagate_occur(p_elem, 1)
self.type_handler[chain[0].arg](chain, p_elem) | python | def type_stmt(self, stmt, p_elem, pset):
"""Handle ``type`` statement.
Built-in types are handled by one of the specific type
callback methods defined below.
"""
typedef = stmt.i_typedef
if typedef and not stmt.i_is_derived: # just ref
uname, dic = self.unique_def_name(typedef)
if uname not in dic:
self.install_def(uname, typedef, dic)
SchemaNode("ref", p_elem).set_attr("name", uname)
defst = typedef.search_one("default")
if defst:
dic[uname].default = defst.arg
occur = 1
else:
occur = dic[uname].occur
if occur > 0: self.propagate_occur(p_elem, occur)
return
chain = [stmt]
tdefault = None
while typedef:
type_ = typedef.search_one("type")
chain.insert(0, type_)
if tdefault is None:
tdef = typedef.search_one("default")
if tdef:
tdefault = tdef.arg
typedef = type_.i_typedef
if tdefault and p_elem.occur == 0:
p_elem.default = tdefault
self.propagate_occur(p_elem, 1)
self.type_handler[chain[0].arg](chain, p_elem) | [
"def",
"type_stmt",
"(",
"self",
",",
"stmt",
",",
"p_elem",
",",
"pset",
")",
":",
"typedef",
"=",
"stmt",
".",
"i_typedef",
"if",
"typedef",
"and",
"not",
"stmt",
".",
"i_is_derived",
":",
"# just ref",
"uname",
",",
"dic",
"=",
"self",
".",
"unique_... | Handle ``type`` statement.
Built-in types are handled by one of the specific type
callback methods defined below. | [
"Handle",
"type",
"statement",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L1119-L1152 | train | 226,053 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.choice_type | def choice_type(self, tchain, p_elem):
"""Handle ``enumeration`` and ``union`` types."""
elem = SchemaNode.choice(p_elem, occur=2)
self.handle_substmts(tchain[0], elem) | python | def choice_type(self, tchain, p_elem):
"""Handle ``enumeration`` and ``union`` types."""
elem = SchemaNode.choice(p_elem, occur=2)
self.handle_substmts(tchain[0], elem) | [
"def",
"choice_type",
"(",
"self",
",",
"tchain",
",",
"p_elem",
")",
":",
"elem",
"=",
"SchemaNode",
".",
"choice",
"(",
"p_elem",
",",
"occur",
"=",
"2",
")",
"self",
".",
"handle_substmts",
"(",
"tchain",
"[",
"0",
"]",
",",
"elem",
")"
] | Handle ``enumeration`` and ``union`` types. | [
"Handle",
"enumeration",
"and",
"union",
"types",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L1223-L1226 | train | 226,054 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.mapped_type | def mapped_type(self, tchain, p_elem):
"""Handle types that are simply mapped to RELAX NG."""
SchemaNode("data", p_elem).set_attr("type",
self.datatype_map[tchain[0].arg]) | python | def mapped_type(self, tchain, p_elem):
"""Handle types that are simply mapped to RELAX NG."""
SchemaNode("data", p_elem).set_attr("type",
self.datatype_map[tchain[0].arg]) | [
"def",
"mapped_type",
"(",
"self",
",",
"tchain",
",",
"p_elem",
")",
":",
"SchemaNode",
"(",
"\"data\"",
",",
"p_elem",
")",
".",
"set_attr",
"(",
"\"type\"",
",",
"self",
".",
"datatype_map",
"[",
"tchain",
"[",
"0",
"]",
".",
"arg",
"]",
")"
] | Handle types that are simply mapped to RELAX NG. | [
"Handle",
"types",
"that",
"are",
"simply",
"mapped",
"to",
"RELAX",
"NG",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L1262-L1265 | train | 226,055 |
mbj4668/pyang | pyang/translators/dsdl.py | HybridDSDLSchema.numeric_type | def numeric_type(self, tchain, p_elem):
"""Handle numeric types."""
typ = tchain[0].arg
def gen_data():
elem = SchemaNode("data").set_attr("type", self.datatype_map[typ])
if typ == "decimal64":
fd = tchain[0].search_one("fraction-digits").arg
SchemaNode("param",elem,"19").set_attr("name","totalDigits")
SchemaNode("param",elem,fd).set_attr("name","fractionDigits")
return elem
self.type_with_ranges(tchain, p_elem, "range", gen_data) | python | def numeric_type(self, tchain, p_elem):
"""Handle numeric types."""
typ = tchain[0].arg
def gen_data():
elem = SchemaNode("data").set_attr("type", self.datatype_map[typ])
if typ == "decimal64":
fd = tchain[0].search_one("fraction-digits").arg
SchemaNode("param",elem,"19").set_attr("name","totalDigits")
SchemaNode("param",elem,fd).set_attr("name","fractionDigits")
return elem
self.type_with_ranges(tchain, p_elem, "range", gen_data) | [
"def",
"numeric_type",
"(",
"self",
",",
"tchain",
",",
"p_elem",
")",
":",
"typ",
"=",
"tchain",
"[",
"0",
"]",
".",
"arg",
"def",
"gen_data",
"(",
")",
":",
"elem",
"=",
"SchemaNode",
"(",
"\"data\"",
")",
".",
"set_attr",
"(",
"\"type\"",
",",
"... | Handle numeric types. | [
"Handle",
"numeric",
"types",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L1267-L1277 | train | 226,056 |
mbj4668/pyang | pyang/grammar.py | add_stmt | def add_stmt(stmt, arg_rules):
"""Use by plugins to add grammar for an extension statement."""
(arg, rules) = arg_rules
stmt_map[stmt] = (arg, rules) | python | def add_stmt(stmt, arg_rules):
"""Use by plugins to add grammar for an extension statement."""
(arg, rules) = arg_rules
stmt_map[stmt] = (arg, rules) | [
"def",
"add_stmt",
"(",
"stmt",
",",
"arg_rules",
")",
":",
"(",
"arg",
",",
"rules",
")",
"=",
"arg_rules",
"stmt_map",
"[",
"stmt",
"]",
"=",
"(",
"arg",
",",
"rules",
")"
] | Use by plugins to add grammar for an extension statement. | [
"Use",
"by",
"plugins",
"to",
"add",
"grammar",
"for",
"an",
"extension",
"statement",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/grammar.py#L77-L80 | train | 226,057 |
mbj4668/pyang | pyang/grammar.py | add_to_stmts_rules | def add_to_stmts_rules(stmts, rules):
"""Use by plugins to add extra rules to the existing rules for
a statement."""
def is_rule_less_than(ra, rb):
rka = ra[0]
rkb = rb[0]
if not util.is_prefixed(rkb):
# old rule is non-prefixed; append new rule after
return False
if not util.is_prefixed(rka):
# old rule prefixed, but new rule is not, insert
return True
# both are prefixed, compare modulename
return rka[0] < rkb[0]
for s in stmts:
(arg, rules0) = stmt_map[s]
for r in rules:
i = 0
while i < len(rules0):
if is_rule_less_than(r, rules0[i]):
rules0.insert(i, r)
break
i += 1
if i == len(rules0):
rules0.insert(i, r) | python | def add_to_stmts_rules(stmts, rules):
"""Use by plugins to add extra rules to the existing rules for
a statement."""
def is_rule_less_than(ra, rb):
rka = ra[0]
rkb = rb[0]
if not util.is_prefixed(rkb):
# old rule is non-prefixed; append new rule after
return False
if not util.is_prefixed(rka):
# old rule prefixed, but new rule is not, insert
return True
# both are prefixed, compare modulename
return rka[0] < rkb[0]
for s in stmts:
(arg, rules0) = stmt_map[s]
for r in rules:
i = 0
while i < len(rules0):
if is_rule_less_than(r, rules0[i]):
rules0.insert(i, r)
break
i += 1
if i == len(rules0):
rules0.insert(i, r) | [
"def",
"add_to_stmts_rules",
"(",
"stmts",
",",
"rules",
")",
":",
"def",
"is_rule_less_than",
"(",
"ra",
",",
"rb",
")",
":",
"rka",
"=",
"ra",
"[",
"0",
"]",
"rkb",
"=",
"rb",
"[",
"0",
"]",
"if",
"not",
"util",
".",
"is_prefixed",
"(",
"rkb",
... | Use by plugins to add extra rules to the existing rules for
a statement. | [
"Use",
"by",
"plugins",
"to",
"add",
"extra",
"rules",
"to",
"the",
"existing",
"rules",
"for",
"a",
"statement",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/grammar.py#L82-L106 | train | 226,058 |
mbj4668/pyang | pyang/grammar.py | chk_module_statements | def chk_module_statements(ctx, module_stmt, canonical=False):
"""Validate the statement hierarchy according to the grammar.
Return True if module is valid, False otherwise.
"""
return chk_statement(ctx, module_stmt, top_stmts, canonical) | python | def chk_module_statements(ctx, module_stmt, canonical=False):
"""Validate the statement hierarchy according to the grammar.
Return True if module is valid, False otherwise.
"""
return chk_statement(ctx, module_stmt, top_stmts, canonical) | [
"def",
"chk_module_statements",
"(",
"ctx",
",",
"module_stmt",
",",
"canonical",
"=",
"False",
")",
":",
"return",
"chk_statement",
"(",
"ctx",
",",
"module_stmt",
",",
"top_stmts",
",",
"canonical",
")"
] | Validate the statement hierarchy according to the grammar.
Return True if module is valid, False otherwise. | [
"Validate",
"the",
"statement",
"hierarchy",
"according",
"to",
"the",
"grammar",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/grammar.py#L570-L575 | train | 226,059 |
mbj4668/pyang | pyang/grammar.py | chk_statement | def chk_statement(ctx, stmt, grammar, canonical=False):
"""Validate `stmt` according to `grammar`.
Marks each statement in the hierearchy with stmt.is_grammatically_valid,
which is a boolean.
Return True if stmt is valid, False otherwise.
"""
n = len(ctx.errors)
if canonical == True:
canspec = grammar
else:
canspec = []
_chk_stmts(ctx, stmt.pos, [stmt], None, (grammar, canspec), canonical)
return n == len(ctx.errors) | python | def chk_statement(ctx, stmt, grammar, canonical=False):
"""Validate `stmt` according to `grammar`.
Marks each statement in the hierearchy with stmt.is_grammatically_valid,
which is a boolean.
Return True if stmt is valid, False otherwise.
"""
n = len(ctx.errors)
if canonical == True:
canspec = grammar
else:
canspec = []
_chk_stmts(ctx, stmt.pos, [stmt], None, (grammar, canspec), canonical)
return n == len(ctx.errors) | [
"def",
"chk_statement",
"(",
"ctx",
",",
"stmt",
",",
"grammar",
",",
"canonical",
"=",
"False",
")",
":",
"n",
"=",
"len",
"(",
"ctx",
".",
"errors",
")",
"if",
"canonical",
"==",
"True",
":",
"canspec",
"=",
"grammar",
"else",
":",
"canspec",
"=",
... | Validate `stmt` according to `grammar`.
Marks each statement in the hierearchy with stmt.is_grammatically_valid,
which is a boolean.
Return True if stmt is valid, False otherwise. | [
"Validate",
"stmt",
"according",
"to",
"grammar",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/grammar.py#L577-L591 | train | 226,060 |
mbj4668/pyang | pyang/grammar.py | sort_canonical | def sort_canonical(keyword, stmts):
"""Sort all `stmts` in the canonical order defined by `keyword`.
Return the sorted list. The `stmt` list is not modified.
If `keyword` does not have a canonical order, the list is returned
as is.
"""
try:
(_arg_type, subspec) = stmt_map[keyword]
except KeyError:
return stmts
res = []
# keep the order of data definition statements and case
keep = [s[0] for s in data_def_stmts] + ['case']
for (kw, _spec) in flatten_spec(subspec):
# keep comments before a statement together with that statement
comments = []
for s in stmts:
if s.keyword == '_comment':
comments.append(s)
elif s.keyword == kw and kw not in keep:
res.extend(comments)
comments = []
res.append(s)
else:
comments = []
# then copy all other statements (extensions)
res.extend([stmt for stmt in stmts if stmt not in res])
return res | python | def sort_canonical(keyword, stmts):
"""Sort all `stmts` in the canonical order defined by `keyword`.
Return the sorted list. The `stmt` list is not modified.
If `keyword` does not have a canonical order, the list is returned
as is.
"""
try:
(_arg_type, subspec) = stmt_map[keyword]
except KeyError:
return stmts
res = []
# keep the order of data definition statements and case
keep = [s[0] for s in data_def_stmts] + ['case']
for (kw, _spec) in flatten_spec(subspec):
# keep comments before a statement together with that statement
comments = []
for s in stmts:
if s.keyword == '_comment':
comments.append(s)
elif s.keyword == kw and kw not in keep:
res.extend(comments)
comments = []
res.append(s)
else:
comments = []
# then copy all other statements (extensions)
res.extend([stmt for stmt in stmts if stmt not in res])
return res | [
"def",
"sort_canonical",
"(",
"keyword",
",",
"stmts",
")",
":",
"try",
":",
"(",
"_arg_type",
",",
"subspec",
")",
"=",
"stmt_map",
"[",
"keyword",
"]",
"except",
"KeyError",
":",
"return",
"stmts",
"res",
"=",
"[",
"]",
"# keep the order of data definition... | Sort all `stmts` in the canonical order defined by `keyword`.
Return the sorted list. The `stmt` list is not modified.
If `keyword` does not have a canonical order, the list is returned
as is. | [
"Sort",
"all",
"stmts",
"in",
"the",
"canonical",
"order",
"defined",
"by",
"keyword",
".",
"Return",
"the",
"sorted",
"list",
".",
"The",
"stmt",
"list",
"is",
"not",
"modified",
".",
"If",
"keyword",
"does",
"not",
"have",
"a",
"canonical",
"order",
"t... | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/grammar.py#L799-L828 | train | 226,061 |
mbj4668/pyang | pyang/xpath_lexer.py | scan | def scan(s):
"""Return a list of tokens, or throw SyntaxError on failure.
"""
line = 1
linepos = 1
pos = 0
toks = []
while pos < len(s):
matched = False
for (tokname, r) in patterns:
m = r.match(s, pos)
if m is not None:
# found a matching token
v = m.group(0)
prec = _preceding_token(toks)
if tokname == 'STAR' and prec is not None and _is_special(prec):
# XPath 1.0 spec, 3.7 special rule 1a
# interpret '*' as a wildcard
tok = XPathTok('wildcard', v, line, linepos)
elif (tokname == 'name' and
prec is not None and not _is_special(prec) and
v in operators):
# XPath 1.0 spec, 3.7 special rule 1b
# interpret the name as an operator
tok = XPathTok(operators[v], v, line, linepos)
elif tokname == 'name':
# check if next token is '('
if re_open_para.match(s, pos + len(v)):
# XPath 1.0 spec, 3.7 special rule 2
if v in node_types:
# XPath 1.0 spec, 3.7 special rule 2a
tok = XPathTok('node_type', v, line, linepos)
else:
# XPath 1.0 spec, 3.7 special rule 2b
tok = XPathTok('function_name', v, line, linepos)
# check if next token is '::'
elif re_axis.match(s, pos + len(v)):
# XPath 1.0 spec, 3.7 special rule 3
if v in axes:
tok = XPathTok('axis', v, line, linepos)
else:
e = "unknown axis %s" % v
raise XPathError(e, line, linepos)
else:
tok = XPathTok('name', v, line, linepos)
else:
tok = XPathTok(tokname, v, line, linepos)
if tokname == '_whitespace':
n = v.count('\n')
if n > 0:
line = line + n
linepos = len(v) - v.rfind('\n')
else:
linepos += len(v)
else:
linepos += len(v)
pos += len(v)
toks.append(tok)
matched = True
break
if matched == False:
# no patterns matched
raise XPathError('syntax error', line, linepos)
return toks | python | def scan(s):
"""Return a list of tokens, or throw SyntaxError on failure.
"""
line = 1
linepos = 1
pos = 0
toks = []
while pos < len(s):
matched = False
for (tokname, r) in patterns:
m = r.match(s, pos)
if m is not None:
# found a matching token
v = m.group(0)
prec = _preceding_token(toks)
if tokname == 'STAR' and prec is not None and _is_special(prec):
# XPath 1.0 spec, 3.7 special rule 1a
# interpret '*' as a wildcard
tok = XPathTok('wildcard', v, line, linepos)
elif (tokname == 'name' and
prec is not None and not _is_special(prec) and
v in operators):
# XPath 1.0 spec, 3.7 special rule 1b
# interpret the name as an operator
tok = XPathTok(operators[v], v, line, linepos)
elif tokname == 'name':
# check if next token is '('
if re_open_para.match(s, pos + len(v)):
# XPath 1.0 spec, 3.7 special rule 2
if v in node_types:
# XPath 1.0 spec, 3.7 special rule 2a
tok = XPathTok('node_type', v, line, linepos)
else:
# XPath 1.0 spec, 3.7 special rule 2b
tok = XPathTok('function_name', v, line, linepos)
# check if next token is '::'
elif re_axis.match(s, pos + len(v)):
# XPath 1.0 spec, 3.7 special rule 3
if v in axes:
tok = XPathTok('axis', v, line, linepos)
else:
e = "unknown axis %s" % v
raise XPathError(e, line, linepos)
else:
tok = XPathTok('name', v, line, linepos)
else:
tok = XPathTok(tokname, v, line, linepos)
if tokname == '_whitespace':
n = v.count('\n')
if n > 0:
line = line + n
linepos = len(v) - v.rfind('\n')
else:
linepos += len(v)
else:
linepos += len(v)
pos += len(v)
toks.append(tok)
matched = True
break
if matched == False:
# no patterns matched
raise XPathError('syntax error', line, linepos)
return toks | [
"def",
"scan",
"(",
"s",
")",
":",
"line",
"=",
"1",
"linepos",
"=",
"1",
"pos",
"=",
"0",
"toks",
"=",
"[",
"]",
"while",
"pos",
"<",
"len",
"(",
"s",
")",
":",
"matched",
"=",
"False",
"for",
"(",
"tokname",
",",
"r",
")",
"in",
"patterns",... | Return a list of tokens, or throw SyntaxError on failure. | [
"Return",
"a",
"list",
"of",
"tokens",
"or",
"throw",
"SyntaxError",
"on",
"failure",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/xpath_lexer.py#L112-L175 | train | 226,062 |
mbj4668/pyang | pyang/hello.py | HelloParser.yang_modules | def yang_modules(self):
"""
Return a list of advertised YANG module names with revisions.
Avoid repeated modules.
"""
res = {}
for c in self.capabilities:
m = c.parameters.get("module")
if m is None or m in res: continue
res[m] = c.parameters.get("revision")
return res.items() | python | def yang_modules(self):
"""
Return a list of advertised YANG module names with revisions.
Avoid repeated modules.
"""
res = {}
for c in self.capabilities:
m = c.parameters.get("module")
if m is None or m in res: continue
res[m] = c.parameters.get("revision")
return res.items() | [
"def",
"yang_modules",
"(",
"self",
")",
":",
"res",
"=",
"{",
"}",
"for",
"c",
"in",
"self",
".",
"capabilities",
":",
"m",
"=",
"c",
".",
"parameters",
".",
"get",
"(",
"\"module\"",
")",
"if",
"m",
"is",
"None",
"or",
"m",
"in",
"res",
":",
... | Return a list of advertised YANG module names with revisions.
Avoid repeated modules. | [
"Return",
"a",
"list",
"of",
"advertised",
"YANG",
"module",
"names",
"with",
"revisions",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/hello.py#L75-L86 | train | 226,063 |
mbj4668/pyang | pyang/hello.py | HelloParser.get_features | def get_features(self, yam):
"""Return list of features declared for module `yam`."""
mcap = [ c for c in self.capabilities
if c.parameters.get("module", None) == yam ][0]
if not mcap.parameters.get("features"): return []
return mcap.parameters["features"].split(",") | python | def get_features(self, yam):
"""Return list of features declared for module `yam`."""
mcap = [ c for c in self.capabilities
if c.parameters.get("module", None) == yam ][0]
if not mcap.parameters.get("features"): return []
return mcap.parameters["features"].split(",") | [
"def",
"get_features",
"(",
"self",
",",
"yam",
")",
":",
"mcap",
"=",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"capabilities",
"if",
"c",
".",
"parameters",
".",
"get",
"(",
"\"module\"",
",",
"None",
")",
"==",
"yam",
"]",
"[",
"0",
"]",
"if",
... | Return list of features declared for module `yam`. | [
"Return",
"list",
"of",
"features",
"declared",
"for",
"module",
"yam",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/hello.py#L88-L93 | train | 226,064 |
mbj4668/pyang | pyang/hello.py | HelloParser.registered_capabilities | def registered_capabilities(self):
"""Return dictionary of non-YANG capabilities.
Only capabilities from the `CAPABILITIES` dictionary are taken
into account.
"""
return dict ([ (CAPABILITIES[c.id],c) for c in self.capabilities
if c.id in CAPABILITIES ]) | python | def registered_capabilities(self):
"""Return dictionary of non-YANG capabilities.
Only capabilities from the `CAPABILITIES` dictionary are taken
into account.
"""
return dict ([ (CAPABILITIES[c.id],c) for c in self.capabilities
if c.id in CAPABILITIES ]) | [
"def",
"registered_capabilities",
"(",
"self",
")",
":",
"return",
"dict",
"(",
"[",
"(",
"CAPABILITIES",
"[",
"c",
".",
"id",
"]",
",",
"c",
")",
"for",
"c",
"in",
"self",
".",
"capabilities",
"if",
"c",
".",
"id",
"in",
"CAPABILITIES",
"]",
")"
] | Return dictionary of non-YANG capabilities.
Only capabilities from the `CAPABILITIES` dictionary are taken
into account. | [
"Return",
"dictionary",
"of",
"non",
"-",
"YANG",
"capabilities",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/hello.py#L95-L102 | train | 226,065 |
mbj4668/pyang | pyang/util.py | listsdelete | def listsdelete(x, xs):
"""Return a new list with x removed from xs"""
i = xs.index(x)
return xs[:i] + xs[(i+1):] | python | def listsdelete(x, xs):
"""Return a new list with x removed from xs"""
i = xs.index(x)
return xs[:i] + xs[(i+1):] | [
"def",
"listsdelete",
"(",
"x",
",",
"xs",
")",
":",
"i",
"=",
"xs",
".",
"index",
"(",
"x",
")",
"return",
"xs",
"[",
":",
"i",
"]",
"+",
"xs",
"[",
"(",
"i",
"+",
"1",
")",
":",
"]"
] | Return a new list with x removed from xs | [
"Return",
"a",
"new",
"list",
"with",
"x",
"removed",
"from",
"xs"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/util.py#L76-L79 | train | 226,066 |
mbj4668/pyang | pyang/util.py | unique_prefixes | def unique_prefixes(context):
"""Return a dictionary with unique prefixes for modules in `context`.
Keys are 'module' statements and values are prefixes,
disambiguated where necessary.
"""
res = {}
for m in context.modules.values():
if m.keyword == "submodule": continue
prf = new = m.i_prefix
suff = 0
while new in res.values():
suff += 1
new = "%s%x" % (prf, suff)
res[m] = new
return res | python | def unique_prefixes(context):
"""Return a dictionary with unique prefixes for modules in `context`.
Keys are 'module' statements and values are prefixes,
disambiguated where necessary.
"""
res = {}
for m in context.modules.values():
if m.keyword == "submodule": continue
prf = new = m.i_prefix
suff = 0
while new in res.values():
suff += 1
new = "%s%x" % (prf, suff)
res[m] = new
return res | [
"def",
"unique_prefixes",
"(",
"context",
")",
":",
"res",
"=",
"{",
"}",
"for",
"m",
"in",
"context",
".",
"modules",
".",
"values",
"(",
")",
":",
"if",
"m",
".",
"keyword",
"==",
"\"submodule\"",
":",
"continue",
"prf",
"=",
"new",
"=",
"m",
"."... | Return a dictionary with unique prefixes for modules in `context`.
Keys are 'module' statements and values are prefixes,
disambiguated where necessary. | [
"Return",
"a",
"dictionary",
"with",
"unique",
"prefixes",
"for",
"modules",
"in",
"context",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/util.py#L120-L135 | train | 226,067 |
mbj4668/pyang | setup.py | PyangDist.preprocess_files | def preprocess_files(self, prefix):
"""Change the installation prefix where necessary.
"""
if prefix is None: return
files = ("bin/yang2dsdl", "man/man1/yang2dsdl.1",
"pyang/plugins/jsonxsl.py")
regex = re.compile("^(.*)/usr/local(.*)$")
for f in files:
inf = open(f)
cnt = inf.readlines()
inf.close()
ouf = open(f,"w")
for line in cnt:
mo = regex.search(line)
if mo is None:
ouf.write(line)
else:
ouf.write(mo.group(1) + prefix + mo.group(2) +
"\n")
ouf.close() | python | def preprocess_files(self, prefix):
"""Change the installation prefix where necessary.
"""
if prefix is None: return
files = ("bin/yang2dsdl", "man/man1/yang2dsdl.1",
"pyang/plugins/jsonxsl.py")
regex = re.compile("^(.*)/usr/local(.*)$")
for f in files:
inf = open(f)
cnt = inf.readlines()
inf.close()
ouf = open(f,"w")
for line in cnt:
mo = regex.search(line)
if mo is None:
ouf.write(line)
else:
ouf.write(mo.group(1) + prefix + mo.group(2) +
"\n")
ouf.close() | [
"def",
"preprocess_files",
"(",
"self",
",",
"prefix",
")",
":",
"if",
"prefix",
"is",
"None",
":",
"return",
"files",
"=",
"(",
"\"bin/yang2dsdl\"",
",",
"\"man/man1/yang2dsdl.1\"",
",",
"\"pyang/plugins/jsonxsl.py\"",
")",
"regex",
"=",
"re",
".",
"compile",
... | Change the installation prefix where necessary. | [
"Change",
"the",
"installation",
"prefix",
"where",
"necessary",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/setup.py#L26-L45 | train | 226,068 |
mbj4668/pyang | pyang/__init__.py | Context.add_module | def add_module(self, ref, text, format=None,
expect_modulename=None, expect_revision=None,
expect_failure_error=True):
"""Parse a module text and add the module data to the context
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`text` is the raw text data
`format` is one of 'yang' or 'yin'.
Returns the parsed and validated module on success, and None on error.
"""
if format == None:
format = util.guess_format(text)
if format == 'yin':
p = yin_parser.YinParser()
else:
p = yang_parser.YangParser()
module = p.parse(self, ref, text)
if module is None:
return None
if expect_modulename is not None:
if not re.match(syntax.re_identifier, expect_modulename):
error.err_add(self.errors, module.pos,
'FILENAME_BAD_MODULE_NAME',
(ref, expect_modulename, syntax.identifier))
elif expect_modulename != module.arg:
if expect_failure_error:
error.err_add(self.errors, module.pos, 'BAD_MODULE_NAME',
(module.arg, ref, expect_modulename))
return None
else:
error.err_add(self.errors, module.pos, 'WBAD_MODULE_NAME',
(module.arg, ref, expect_modulename))
latest_rev = util.get_latest_revision(module)
if expect_revision is not None:
if not re.match(syntax.re_date, expect_revision):
error.err_add(self.errors, module.pos, 'FILENAME_BAD_REVISION',
(ref, expect_revision, 'YYYY-MM-DD'))
elif expect_revision != latest_rev:
if expect_failure_error:
error.err_add(self.errors, module.pos, 'BAD_REVISION',
(latest_rev, ref, expect_revision))
return None
else:
error.err_add(self.errors, module.pos, 'WBAD_REVISION',
(latest_rev, ref, expect_revision))
if module.arg not in self.revs:
self.revs[module.arg] = []
revs = self.revs[module.arg]
revs.append((latest_rev, None))
return self.add_parsed_module(module) | python | def add_module(self, ref, text, format=None,
expect_modulename=None, expect_revision=None,
expect_failure_error=True):
"""Parse a module text and add the module data to the context
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`text` is the raw text data
`format` is one of 'yang' or 'yin'.
Returns the parsed and validated module on success, and None on error.
"""
if format == None:
format = util.guess_format(text)
if format == 'yin':
p = yin_parser.YinParser()
else:
p = yang_parser.YangParser()
module = p.parse(self, ref, text)
if module is None:
return None
if expect_modulename is not None:
if not re.match(syntax.re_identifier, expect_modulename):
error.err_add(self.errors, module.pos,
'FILENAME_BAD_MODULE_NAME',
(ref, expect_modulename, syntax.identifier))
elif expect_modulename != module.arg:
if expect_failure_error:
error.err_add(self.errors, module.pos, 'BAD_MODULE_NAME',
(module.arg, ref, expect_modulename))
return None
else:
error.err_add(self.errors, module.pos, 'WBAD_MODULE_NAME',
(module.arg, ref, expect_modulename))
latest_rev = util.get_latest_revision(module)
if expect_revision is not None:
if not re.match(syntax.re_date, expect_revision):
error.err_add(self.errors, module.pos, 'FILENAME_BAD_REVISION',
(ref, expect_revision, 'YYYY-MM-DD'))
elif expect_revision != latest_rev:
if expect_failure_error:
error.err_add(self.errors, module.pos, 'BAD_REVISION',
(latest_rev, ref, expect_revision))
return None
else:
error.err_add(self.errors, module.pos, 'WBAD_REVISION',
(latest_rev, ref, expect_revision))
if module.arg not in self.revs:
self.revs[module.arg] = []
revs = self.revs[module.arg]
revs.append((latest_rev, None))
return self.add_parsed_module(module) | [
"def",
"add_module",
"(",
"self",
",",
"ref",
",",
"text",
",",
"format",
"=",
"None",
",",
"expect_modulename",
"=",
"None",
",",
"expect_revision",
"=",
"None",
",",
"expect_failure_error",
"=",
"True",
")",
":",
"if",
"format",
"==",
"None",
":",
"for... | Parse a module text and add the module data to the context
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`text` is the raw text data
`format` is one of 'yang' or 'yin'.
Returns the parsed and validated module on success, and None on error. | [
"Parse",
"a",
"module",
"text",
"and",
"add",
"the",
"module",
"data",
"to",
"the",
"context"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/__init__.py#L56-L113 | train | 226,069 |
mbj4668/pyang | pyang/__init__.py | Context.del_module | def del_module(self, module):
"""Remove a module from the context"""
rev = util.get_latest_revision(module)
del self.modules[(module.arg, rev)] | python | def del_module(self, module):
"""Remove a module from the context"""
rev = util.get_latest_revision(module)
del self.modules[(module.arg, rev)] | [
"def",
"del_module",
"(",
"self",
",",
"module",
")",
":",
"rev",
"=",
"util",
".",
"get_latest_revision",
"(",
"module",
")",
"del",
"self",
".",
"modules",
"[",
"(",
"module",
".",
"arg",
",",
"rev",
")",
"]"
] | Remove a module from the context | [
"Remove",
"a",
"module",
"from",
"the",
"context"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/__init__.py#L138-L141 | train | 226,070 |
mbj4668/pyang | pyang/__init__.py | Context.get_module | def get_module(self, modulename, revision=None):
"""Return the module if it exists in the context"""
if revision is None and modulename in self.revs:
(revision, _handle) = self._get_latest_rev(self.revs[modulename])
if revision is not None:
if (modulename,revision) in self.modules:
return self.modules[(modulename, revision)]
else:
return None | python | def get_module(self, modulename, revision=None):
"""Return the module if it exists in the context"""
if revision is None and modulename in self.revs:
(revision, _handle) = self._get_latest_rev(self.revs[modulename])
if revision is not None:
if (modulename,revision) in self.modules:
return self.modules[(modulename, revision)]
else:
return None | [
"def",
"get_module",
"(",
"self",
",",
"modulename",
",",
"revision",
"=",
"None",
")",
":",
"if",
"revision",
"is",
"None",
"and",
"modulename",
"in",
"self",
".",
"revs",
":",
"(",
"revision",
",",
"_handle",
")",
"=",
"self",
".",
"_get_latest_rev",
... | Return the module if it exists in the context | [
"Return",
"the",
"module",
"if",
"it",
"exists",
"in",
"the",
"context"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/__init__.py#L143-L151 | train | 226,071 |
mbj4668/pyang | pyang/plugin.py | init | def init(plugindirs=[]):
"""Initialize the plugin framework"""
# initialize the builtin plugins
from .translators import yang,yin,dsdl
yang.pyang_plugin_init()
yin.pyang_plugin_init()
dsdl.pyang_plugin_init()
# initialize installed plugins
for ep in pkg_resources.iter_entry_points(group='pyang.plugin'):
plugin_init = ep.load()
plugin_init()
# search for plugins in std directories (plugins directory first)
basedir = os.path.split(sys.modules['pyang'].__file__)[0]
plugindirs.insert(0, basedir + "/transforms")
plugindirs.insert(0, basedir + "/plugins")
# add paths from env
pluginpath = os.getenv('PYANG_PLUGINPATH')
if pluginpath is not None:
plugindirs.extend(pluginpath.split(os.pathsep))
syspath = sys.path
for plugindir in plugindirs:
sys.path = [plugindir] + syspath
try:
fnames = os.listdir(plugindir)
except OSError:
continue
modnames = []
for fname in fnames:
if (fname.startswith(".#") or
fname.startswith("__init__.py") or
fname.endswith("_flymake.py") or
fname.endswith("_flymake.pyc")):
pass
elif fname.endswith(".py"):
modname = fname[:-3]
if modname not in modnames:
modnames.append(modname)
elif fname.endswith(".pyc"):
modname = fname[:-4]
if modname not in modnames:
modnames.append(modname)
for modname in modnames:
pluginmod = __import__(modname)
try:
pluginmod.pyang_plugin_init()
except AttributeError as s:
print(pluginmod.__dict__)
raise AttributeError(pluginmod.__file__ + ': ' + str(s))
sys.path = syspath | python | def init(plugindirs=[]):
"""Initialize the plugin framework"""
# initialize the builtin plugins
from .translators import yang,yin,dsdl
yang.pyang_plugin_init()
yin.pyang_plugin_init()
dsdl.pyang_plugin_init()
# initialize installed plugins
for ep in pkg_resources.iter_entry_points(group='pyang.plugin'):
plugin_init = ep.load()
plugin_init()
# search for plugins in std directories (plugins directory first)
basedir = os.path.split(sys.modules['pyang'].__file__)[0]
plugindirs.insert(0, basedir + "/transforms")
plugindirs.insert(0, basedir + "/plugins")
# add paths from env
pluginpath = os.getenv('PYANG_PLUGINPATH')
if pluginpath is not None:
plugindirs.extend(pluginpath.split(os.pathsep))
syspath = sys.path
for plugindir in plugindirs:
sys.path = [plugindir] + syspath
try:
fnames = os.listdir(plugindir)
except OSError:
continue
modnames = []
for fname in fnames:
if (fname.startswith(".#") or
fname.startswith("__init__.py") or
fname.endswith("_flymake.py") or
fname.endswith("_flymake.pyc")):
pass
elif fname.endswith(".py"):
modname = fname[:-3]
if modname not in modnames:
modnames.append(modname)
elif fname.endswith(".pyc"):
modname = fname[:-4]
if modname not in modnames:
modnames.append(modname)
for modname in modnames:
pluginmod = __import__(modname)
try:
pluginmod.pyang_plugin_init()
except AttributeError as s:
print(pluginmod.__dict__)
raise AttributeError(pluginmod.__file__ + ': ' + str(s))
sys.path = syspath | [
"def",
"init",
"(",
"plugindirs",
"=",
"[",
"]",
")",
":",
"# initialize the builtin plugins",
"from",
".",
"translators",
"import",
"yang",
",",
"yin",
",",
"dsdl",
"yang",
".",
"pyang_plugin_init",
"(",
")",
"yin",
".",
"pyang_plugin_init",
"(",
")",
"dsdl... | Initialize the plugin framework | [
"Initialize",
"the",
"plugin",
"framework"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugin.py#L10-L63 | train | 226,072 |
mbj4668/pyang | pyang/yin_parser.py | YinParser.split_qname | def split_qname(qname):
"""Split `qname` into namespace URI and local name
Return namespace and local name as a tuple. This is a static
method."""
res = qname.split(YinParser.ns_sep)
if len(res) == 1: # no namespace
return None, res[0]
else:
return res | python | def split_qname(qname):
"""Split `qname` into namespace URI and local name
Return namespace and local name as a tuple. This is a static
method."""
res = qname.split(YinParser.ns_sep)
if len(res) == 1: # no namespace
return None, res[0]
else:
return res | [
"def",
"split_qname",
"(",
"qname",
")",
":",
"res",
"=",
"qname",
".",
"split",
"(",
"YinParser",
".",
"ns_sep",
")",
"if",
"len",
"(",
"res",
")",
"==",
"1",
":",
"# no namespace",
"return",
"None",
",",
"res",
"[",
"0",
"]",
"else",
":",
"return... | Split `qname` into namespace URI and local name
Return namespace and local name as a tuple. This is a static
method. | [
"Split",
"qname",
"into",
"namespace",
"URI",
"and",
"local",
"name"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/yin_parser.py#L55-L64 | train | 226,073 |
mbj4668/pyang | pyang/yin_parser.py | YinParser.check_attr | def check_attr(self, pos, attrs):
"""Check for unknown attributes."""
for at in attrs:
(ns, local_name) = self.split_qname(at)
if ns is None:
error.err_add(self.ctx.errors, pos,
'UNEXPECTED_ATTRIBUTE', local_name)
elif ns == yin_namespace:
error.err_add(self.ctx.errors, pos,
'UNEXPECTED_ATTRIBUTE', "{"+at) | python | def check_attr(self, pos, attrs):
"""Check for unknown attributes."""
for at in attrs:
(ns, local_name) = self.split_qname(at)
if ns is None:
error.err_add(self.ctx.errors, pos,
'UNEXPECTED_ATTRIBUTE', local_name)
elif ns == yin_namespace:
error.err_add(self.ctx.errors, pos,
'UNEXPECTED_ATTRIBUTE', "{"+at) | [
"def",
"check_attr",
"(",
"self",
",",
"pos",
",",
"attrs",
")",
":",
"for",
"at",
"in",
"attrs",
":",
"(",
"ns",
",",
"local_name",
")",
"=",
"self",
".",
"split_qname",
"(",
"at",
")",
"if",
"ns",
"is",
"None",
":",
"error",
".",
"err_add",
"("... | Check for unknown attributes. | [
"Check",
"for",
"unknown",
"attributes",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/yin_parser.py#L220-L230 | train | 226,074 |
mbj4668/pyang | pyang/yin_parser.py | YinParser.search_definition | def search_definition(self, module, keyword, arg):
"""Search for a defintion with `keyword` `name`
Search the module and its submodules."""
r = module.search_one(keyword, arg)
if r is not None:
return r
for i in module.search('include'):
modulename = i.arg
m = self.ctx.search_module(i.pos, modulename)
if m is not None:
r = m.search_one(keyword, arg)
if r is not None:
return r
return None | python | def search_definition(self, module, keyword, arg):
"""Search for a defintion with `keyword` `name`
Search the module and its submodules."""
r = module.search_one(keyword, arg)
if r is not None:
return r
for i in module.search('include'):
modulename = i.arg
m = self.ctx.search_module(i.pos, modulename)
if m is not None:
r = m.search_one(keyword, arg)
if r is not None:
return r
return None | [
"def",
"search_definition",
"(",
"self",
",",
"module",
",",
"keyword",
",",
"arg",
")",
":",
"r",
"=",
"module",
".",
"search_one",
"(",
"keyword",
",",
"arg",
")",
"if",
"r",
"is",
"not",
"None",
":",
"return",
"r",
"for",
"i",
"in",
"module",
".... | Search for a defintion with `keyword` `name`
Search the module and its submodules. | [
"Search",
"for",
"a",
"defintion",
"with",
"keyword",
"name",
"Search",
"the",
"module",
"and",
"its",
"submodules",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/yin_parser.py#L377-L390 | train | 226,075 |
mbj4668/pyang | pyang/translators/yang.py | emit_path_arg | def emit_path_arg(keywordstr, arg, fd, indent, max_line_len, line_len, eol):
"""Heuristically pretty print a path argument"""
quote = '"'
arg = escape_str(arg)
if not(need_new_line(max_line_len, line_len, arg)):
fd.write(" " + quote + arg + quote)
return False
## FIXME: we should split the path on '/' and '[]' into multiple lines
## and then print each line
num_chars = max_line_len - line_len
if num_chars <= 0:
# really small max_line_len; we give up
fd.write(" " + quote + arg + quote)
return False
while num_chars > 2 and arg[num_chars - 1:num_chars].isalnum():
num_chars -= 1
fd.write(" " + quote + arg[:num_chars] + quote)
arg = arg[num_chars:]
keyword_cont = ((len(keywordstr) - 1) * ' ') + '+'
while arg != '':
line_len = len(
"%s%s %s%s%s%s" % (indent, keyword_cont, quote, arg, quote, eol))
num_chars = len(arg) - (line_len - max_line_len)
while num_chars > 2 and arg[num_chars - 1:num_chars].isalnum():
num_chars -= 1
fd.write('\n' + indent + keyword_cont + " " +
quote + arg[:num_chars] + quote)
arg = arg[num_chars:] | python | def emit_path_arg(keywordstr, arg, fd, indent, max_line_len, line_len, eol):
"""Heuristically pretty print a path argument"""
quote = '"'
arg = escape_str(arg)
if not(need_new_line(max_line_len, line_len, arg)):
fd.write(" " + quote + arg + quote)
return False
## FIXME: we should split the path on '/' and '[]' into multiple lines
## and then print each line
num_chars = max_line_len - line_len
if num_chars <= 0:
# really small max_line_len; we give up
fd.write(" " + quote + arg + quote)
return False
while num_chars > 2 and arg[num_chars - 1:num_chars].isalnum():
num_chars -= 1
fd.write(" " + quote + arg[:num_chars] + quote)
arg = arg[num_chars:]
keyword_cont = ((len(keywordstr) - 1) * ' ') + '+'
while arg != '':
line_len = len(
"%s%s %s%s%s%s" % (indent, keyword_cont, quote, arg, quote, eol))
num_chars = len(arg) - (line_len - max_line_len)
while num_chars > 2 and arg[num_chars - 1:num_chars].isalnum():
num_chars -= 1
fd.write('\n' + indent + keyword_cont + " " +
quote + arg[:num_chars] + quote)
arg = arg[num_chars:] | [
"def",
"emit_path_arg",
"(",
"keywordstr",
",",
"arg",
",",
"fd",
",",
"indent",
",",
"max_line_len",
",",
"line_len",
",",
"eol",
")",
":",
"quote",
"=",
"'\"'",
"arg",
"=",
"escape_str",
"(",
"arg",
")",
"if",
"not",
"(",
"need_new_line",
"(",
"max_l... | Heuristically pretty print a path argument | [
"Heuristically",
"pretty",
"print",
"a",
"path",
"argument"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/yang.py#L323-L356 | train | 226,076 |
mbj4668/pyang | pyang/translators/yang.py | emit_arg | def emit_arg(keywordstr, stmt, fd, indent, indentstep, max_line_len, line_len):
"""Heuristically pretty print the argument string with double quotes"""
arg = escape_str(stmt.arg)
lines = arg.splitlines(True)
if len(lines) <= 1:
if len(arg) > 0 and arg[-1] == '\n':
arg = arg[:-1] + r'\n'
if (stmt.keyword in _force_newline_arg or
need_new_line(max_line_len, line_len, arg)):
fd.write('\n' + indent + indentstep + '"' + arg + '"')
return True
else:
fd.write(' "' + arg + '"')
return False
else:
need_nl = False
if stmt.keyword in _force_newline_arg:
need_nl = True
elif len(keywordstr) > 8:
# Heuristics: multi-line after a "long" keyword looks better
# than after a "short" keyword (compare 'when' and 'error-message')
need_nl = True
else:
for line in lines:
if need_new_line(max_line_len, line_len + 1, line):
need_nl = True
break
if need_nl:
fd.write('\n' + indent + indentstep)
prefix = indent + indentstep
else:
fd.write(' ')
prefix = indent + len(keywordstr) * ' ' + ' '
fd.write('"' + lines[0])
for line in lines[1:-1]:
if line[0] == '\n':
fd.write('\n')
else:
fd.write(prefix + ' ' + line)
# write last line
fd.write(prefix + ' ' + lines[-1])
if lines[-1][-1] == '\n':
# last line ends with a newline, indent the ending quote
fd.write(prefix + '"')
else:
fd.write('"')
return True | python | def emit_arg(keywordstr, stmt, fd, indent, indentstep, max_line_len, line_len):
"""Heuristically pretty print the argument string with double quotes"""
arg = escape_str(stmt.arg)
lines = arg.splitlines(True)
if len(lines) <= 1:
if len(arg) > 0 and arg[-1] == '\n':
arg = arg[:-1] + r'\n'
if (stmt.keyword in _force_newline_arg or
need_new_line(max_line_len, line_len, arg)):
fd.write('\n' + indent + indentstep + '"' + arg + '"')
return True
else:
fd.write(' "' + arg + '"')
return False
else:
need_nl = False
if stmt.keyword in _force_newline_arg:
need_nl = True
elif len(keywordstr) > 8:
# Heuristics: multi-line after a "long" keyword looks better
# than after a "short" keyword (compare 'when' and 'error-message')
need_nl = True
else:
for line in lines:
if need_new_line(max_line_len, line_len + 1, line):
need_nl = True
break
if need_nl:
fd.write('\n' + indent + indentstep)
prefix = indent + indentstep
else:
fd.write(' ')
prefix = indent + len(keywordstr) * ' ' + ' '
fd.write('"' + lines[0])
for line in lines[1:-1]:
if line[0] == '\n':
fd.write('\n')
else:
fd.write(prefix + ' ' + line)
# write last line
fd.write(prefix + ' ' + lines[-1])
if lines[-1][-1] == '\n':
# last line ends with a newline, indent the ending quote
fd.write(prefix + '"')
else:
fd.write('"')
return True | [
"def",
"emit_arg",
"(",
"keywordstr",
",",
"stmt",
",",
"fd",
",",
"indent",
",",
"indentstep",
",",
"max_line_len",
",",
"line_len",
")",
":",
"arg",
"=",
"escape_str",
"(",
"stmt",
".",
"arg",
")",
"lines",
"=",
"arg",
".",
"splitlines",
"(",
"True",... | Heuristically pretty print the argument string with double quotes | [
"Heuristically",
"pretty",
"print",
"the",
"argument",
"string",
"with",
"double",
"quotes"
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/yang.py#L358-L404 | train | 226,077 |
mbj4668/pyang | pyang/plugins/sample-xml-skeleton.py | SampleXMLSkeletonPlugin.process_children | def process_children(self, node, elem, module, path, omit=[]):
"""Proceed with all children of `node`."""
for ch in node.i_children:
if ch not in omit and (ch.i_config or self.doctype == "data"):
self.node_handler.get(ch.keyword, self.ignore)(
ch, elem, module, path) | python | def process_children(self, node, elem, module, path, omit=[]):
"""Proceed with all children of `node`."""
for ch in node.i_children:
if ch not in omit and (ch.i_config or self.doctype == "data"):
self.node_handler.get(ch.keyword, self.ignore)(
ch, elem, module, path) | [
"def",
"process_children",
"(",
"self",
",",
"node",
",",
"elem",
",",
"module",
",",
"path",
",",
"omit",
"=",
"[",
"]",
")",
":",
"for",
"ch",
"in",
"node",
".",
"i_children",
":",
"if",
"ch",
"not",
"in",
"omit",
"and",
"(",
"ch",
".",
"i_conf... | Proceed with all children of `node`. | [
"Proceed",
"with",
"all",
"children",
"of",
"node",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L135-L140 | train | 226,078 |
mbj4668/pyang | pyang/plugins/sample-xml-skeleton.py | SampleXMLSkeletonPlugin.container | def container(self, node, elem, module, path):
"""Create a sample container element and proceed with its children."""
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
if self.annots:
pres = node.search_one("presence")
if pres is not None:
nel.append(etree.Comment(" presence: %s " % pres.arg))
self.process_children(node, nel, newm, path) | python | def container(self, node, elem, module, path):
"""Create a sample container element and proceed with its children."""
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
if self.annots:
pres = node.search_one("presence")
if pres is not None:
nel.append(etree.Comment(" presence: %s " % pres.arg))
self.process_children(node, nel, newm, path) | [
"def",
"container",
"(",
"self",
",",
"node",
",",
"elem",
",",
"module",
",",
"path",
")",
":",
"nel",
",",
"newm",
",",
"path",
"=",
"self",
".",
"sample_element",
"(",
"node",
",",
"elem",
",",
"module",
",",
"path",
")",
"if",
"path",
"is",
"... | Create a sample container element and proceed with its children. | [
"Create",
"a",
"sample",
"container",
"element",
"and",
"proceed",
"with",
"its",
"children",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L142-L151 | train | 226,079 |
mbj4668/pyang | pyang/plugins/sample-xml-skeleton.py | SampleXMLSkeletonPlugin.leaf | def leaf(self, node, elem, module, path):
"""Create a sample leaf element."""
if node.i_default is None:
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
if self.annots:
nel.append(etree.Comment(
" type: %s " % node.search_one("type").arg))
elif self.defaults:
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
nel.text = str(node.i_default_str) | python | def leaf(self, node, elem, module, path):
"""Create a sample leaf element."""
if node.i_default is None:
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
if self.annots:
nel.append(etree.Comment(
" type: %s " % node.search_one("type").arg))
elif self.defaults:
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
nel.text = str(node.i_default_str) | [
"def",
"leaf",
"(",
"self",
",",
"node",
",",
"elem",
",",
"module",
",",
"path",
")",
":",
"if",
"node",
".",
"i_default",
"is",
"None",
":",
"nel",
",",
"newm",
",",
"path",
"=",
"self",
".",
"sample_element",
"(",
"node",
",",
"elem",
",",
"mo... | Create a sample leaf element. | [
"Create",
"a",
"sample",
"leaf",
"element",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L153-L166 | train | 226,080 |
mbj4668/pyang | pyang/plugins/sample-xml-skeleton.py | SampleXMLSkeletonPlugin.anyxml | def anyxml(self, node, elem, module, path):
"""Create a sample anyxml element."""
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
if self.annots:
nel.append(etree.Comment(" anyxml ")) | python | def anyxml(self, node, elem, module, path):
"""Create a sample anyxml element."""
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
if self.annots:
nel.append(etree.Comment(" anyxml ")) | [
"def",
"anyxml",
"(",
"self",
",",
"node",
",",
"elem",
",",
"module",
",",
"path",
")",
":",
"nel",
",",
"newm",
",",
"path",
"=",
"self",
".",
"sample_element",
"(",
"node",
",",
"elem",
",",
"module",
",",
"path",
")",
"if",
"path",
"is",
"Non... | Create a sample anyxml element. | [
"Create",
"a",
"sample",
"anyxml",
"element",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L168-L174 | train | 226,081 |
mbj4668/pyang | pyang/plugins/sample-xml-skeleton.py | SampleXMLSkeletonPlugin.list | def list(self, node, elem, module, path):
"""Create sample entries of a list."""
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
for kn in node.i_key:
self.node_handler.get(kn.keyword, self.ignore)(
kn, nel, newm, path)
self.process_children(node, nel, newm, path, node.i_key)
minel = node.search_one("min-elements")
self.add_copies(node, elem, nel, minel)
if self.annots:
self.list_comment(node, nel, minel) | python | def list(self, node, elem, module, path):
"""Create sample entries of a list."""
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
for kn in node.i_key:
self.node_handler.get(kn.keyword, self.ignore)(
kn, nel, newm, path)
self.process_children(node, nel, newm, path, node.i_key)
minel = node.search_one("min-elements")
self.add_copies(node, elem, nel, minel)
if self.annots:
self.list_comment(node, nel, minel) | [
"def",
"list",
"(",
"self",
",",
"node",
",",
"elem",
",",
"module",
",",
"path",
")",
":",
"nel",
",",
"newm",
",",
"path",
"=",
"self",
".",
"sample_element",
"(",
"node",
",",
"elem",
",",
"module",
",",
"path",
")",
"if",
"path",
"is",
"None"... | Create sample entries of a list. | [
"Create",
"sample",
"entries",
"of",
"a",
"list",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L176-L188 | train | 226,082 |
mbj4668/pyang | pyang/plugins/sample-xml-skeleton.py | SampleXMLSkeletonPlugin.leaf_list | def leaf_list(self, node, elem, module, path):
"""Create sample entries of a leaf-list."""
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
minel = node.search_one("min-elements")
self.add_copies(node, elem, nel, minel)
self.list_comment(node, nel, minel) | python | def leaf_list(self, node, elem, module, path):
"""Create sample entries of a leaf-list."""
nel, newm, path = self.sample_element(node, elem, module, path)
if path is None:
return
minel = node.search_one("min-elements")
self.add_copies(node, elem, nel, minel)
self.list_comment(node, nel, minel) | [
"def",
"leaf_list",
"(",
"self",
",",
"node",
",",
"elem",
",",
"module",
",",
"path",
")",
":",
"nel",
",",
"newm",
",",
"path",
"=",
"self",
".",
"sample_element",
"(",
"node",
",",
"elem",
",",
"module",
",",
"path",
")",
"if",
"path",
"is",
"... | Create sample entries of a leaf-list. | [
"Create",
"sample",
"entries",
"of",
"a",
"leaf",
"-",
"list",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L190-L197 | train | 226,083 |
mbj4668/pyang | pyang/plugins/sample-xml-skeleton.py | SampleXMLSkeletonPlugin.sample_element | def sample_element(self, node, parent, module, path):
"""Create element under `parent`.
Declare new namespace if necessary.
"""
if path is None:
return parent, module, None
elif path == []:
# GO ON
pass
else:
if node.arg == path[0]:
path = path[1:]
else:
return parent, module, None
res = etree.SubElement(parent, node.arg)
mm = node.main_module()
if mm != module:
res.attrib["xmlns"] = self.ns_uri[mm]
module = mm
return res, module, path | python | def sample_element(self, node, parent, module, path):
"""Create element under `parent`.
Declare new namespace if necessary.
"""
if path is None:
return parent, module, None
elif path == []:
# GO ON
pass
else:
if node.arg == path[0]:
path = path[1:]
else:
return parent, module, None
res = etree.SubElement(parent, node.arg)
mm = node.main_module()
if mm != module:
res.attrib["xmlns"] = self.ns_uri[mm]
module = mm
return res, module, path | [
"def",
"sample_element",
"(",
"self",
",",
"node",
",",
"parent",
",",
"module",
",",
"path",
")",
":",
"if",
"path",
"is",
"None",
":",
"return",
"parent",
",",
"module",
",",
"None",
"elif",
"path",
"==",
"[",
"]",
":",
"# GO ON",
"pass",
"else",
... | Create element under `parent`.
Declare new namespace if necessary. | [
"Create",
"element",
"under",
"parent",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L199-L220 | train | 226,084 |
mbj4668/pyang | pyang/plugins/sample-xml-skeleton.py | SampleXMLSkeletonPlugin.add_copies | def add_copies(self, node, parent, elem, minel):
"""Add appropriate number of `elem` copies to `parent`."""
rep = 0 if minel is None else int(minel.arg) - 1
for i in range(rep):
parent.append(copy.deepcopy(elem)) | python | def add_copies(self, node, parent, elem, minel):
"""Add appropriate number of `elem` copies to `parent`."""
rep = 0 if minel is None else int(minel.arg) - 1
for i in range(rep):
parent.append(copy.deepcopy(elem)) | [
"def",
"add_copies",
"(",
"self",
",",
"node",
",",
"parent",
",",
"elem",
",",
"minel",
")",
":",
"rep",
"=",
"0",
"if",
"minel",
"is",
"None",
"else",
"int",
"(",
"minel",
".",
"arg",
")",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"rep",
")",... | Add appropriate number of `elem` copies to `parent`. | [
"Add",
"appropriate",
"number",
"of",
"elem",
"copies",
"to",
"parent",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L222-L226 | train | 226,085 |
mbj4668/pyang | pyang/plugins/sample-xml-skeleton.py | SampleXMLSkeletonPlugin.list_comment | def list_comment(self, node, elem, minel):
"""Add list annotation to `elem`."""
lo = "0" if minel is None else minel.arg
maxel = node.search_one("max-elements")
hi = "" if maxel is None else maxel.arg
elem.insert(0, etree.Comment(
" # entries: %s..%s " % (lo, hi)))
if node.keyword == 'list':
elem.insert(0, etree.Comment(
" # keys: " + ",".join([k.arg for k in node.i_key]))) | python | def list_comment(self, node, elem, minel):
"""Add list annotation to `elem`."""
lo = "0" if minel is None else minel.arg
maxel = node.search_one("max-elements")
hi = "" if maxel is None else maxel.arg
elem.insert(0, etree.Comment(
" # entries: %s..%s " % (lo, hi)))
if node.keyword == 'list':
elem.insert(0, etree.Comment(
" # keys: " + ",".join([k.arg for k in node.i_key]))) | [
"def",
"list_comment",
"(",
"self",
",",
"node",
",",
"elem",
",",
"minel",
")",
":",
"lo",
"=",
"\"0\"",
"if",
"minel",
"is",
"None",
"else",
"minel",
".",
"arg",
"maxel",
"=",
"node",
".",
"search_one",
"(",
"\"max-elements\"",
")",
"hi",
"=",
"\"\... | Add list annotation to `elem`. | [
"Add",
"list",
"annotation",
"to",
"elem",
"."
] | f2a5cc3142162e5b9ee4e18d154568d939ff63dd | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L228-L237 | train | 226,086 |
quantmind/pulsar | pulsar/utils/slugify.py | slugify | def slugify(value, separator='-', max_length=0, word_boundary=False,
entities=True, decimal=True, hexadecimal=True):
'''Normalizes string, removes non-alpha characters,
and converts spaces to ``separator`` character
'''
value = normalize('NFKD', to_string(value, 'utf-8', 'ignore'))
if unidecode:
value = unidecode(value)
# character entity reference
if entities:
value = CHAR_ENTITY_REXP.sub(
lambda m: chr(name2codepoint[m.group(1)]), value)
# decimal character reference
if decimal:
try:
value = DECIMAL_REXP.sub(lambda m: chr(int(m.group(1))), value)
except Exception:
pass
# hexadecimal character reference
if hexadecimal:
try:
value = HEX_REXP.sub(lambda m: chr(int(m.group(1), 16)), value)
except Exception:
pass
value = value.lower()
value = REPLACE1_REXP.sub('', value)
value = REPLACE2_REXP.sub('-', value)
# remove redundant -
value = REMOVE_REXP.sub('-', value).strip('-')
# smart truncate if requested
if max_length > 0:
value = smart_truncate(value, max_length, word_boundary, '-')
if separator != '-':
value = value.replace('-', separator)
return value | python | def slugify(value, separator='-', max_length=0, word_boundary=False,
entities=True, decimal=True, hexadecimal=True):
'''Normalizes string, removes non-alpha characters,
and converts spaces to ``separator`` character
'''
value = normalize('NFKD', to_string(value, 'utf-8', 'ignore'))
if unidecode:
value = unidecode(value)
# character entity reference
if entities:
value = CHAR_ENTITY_REXP.sub(
lambda m: chr(name2codepoint[m.group(1)]), value)
# decimal character reference
if decimal:
try:
value = DECIMAL_REXP.sub(lambda m: chr(int(m.group(1))), value)
except Exception:
pass
# hexadecimal character reference
if hexadecimal:
try:
value = HEX_REXP.sub(lambda m: chr(int(m.group(1), 16)), value)
except Exception:
pass
value = value.lower()
value = REPLACE1_REXP.sub('', value)
value = REPLACE2_REXP.sub('-', value)
# remove redundant -
value = REMOVE_REXP.sub('-', value).strip('-')
# smart truncate if requested
if max_length > 0:
value = smart_truncate(value, max_length, word_boundary, '-')
if separator != '-':
value = value.replace('-', separator)
return value | [
"def",
"slugify",
"(",
"value",
",",
"separator",
"=",
"'-'",
",",
"max_length",
"=",
"0",
",",
"word_boundary",
"=",
"False",
",",
"entities",
"=",
"True",
",",
"decimal",
"=",
"True",
",",
"hexadecimal",
"=",
"True",
")",
":",
"value",
"=",
"normaliz... | Normalizes string, removes non-alpha characters,
and converts spaces to ``separator`` character | [
"Normalizes",
"string",
"removes",
"non",
"-",
"alpha",
"characters",
"and",
"converts",
"spaces",
"to",
"separator",
"character"
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/slugify.py#L32-L75 | train | 226,087 |
quantmind/pulsar | pulsar/utils/slugify.py | smart_truncate | def smart_truncate(value, max_length=0, word_boundaries=False, separator=' '):
""" Truncate a string """
value = value.strip(separator)
if not max_length:
return value
if len(value) < max_length:
return value
if not word_boundaries:
return value[:max_length].strip(separator)
if separator not in value:
return value[:max_length]
truncated = ''
for word in value.split(separator):
if word:
next_len = len(truncated) + len(word) + len(separator)
if next_len <= max_length:
truncated += '{0}{1}'.format(word, separator)
if not truncated:
truncated = value[:max_length]
return truncated.strip(separator) | python | def smart_truncate(value, max_length=0, word_boundaries=False, separator=' '):
""" Truncate a string """
value = value.strip(separator)
if not max_length:
return value
if len(value) < max_length:
return value
if not word_boundaries:
return value[:max_length].strip(separator)
if separator not in value:
return value[:max_length]
truncated = ''
for word in value.split(separator):
if word:
next_len = len(truncated) + len(word) + len(separator)
if next_len <= max_length:
truncated += '{0}{1}'.format(word, separator)
if not truncated:
truncated = value[:max_length]
return truncated.strip(separator) | [
"def",
"smart_truncate",
"(",
"value",
",",
"max_length",
"=",
"0",
",",
"word_boundaries",
"=",
"False",
",",
"separator",
"=",
"' '",
")",
":",
"value",
"=",
"value",
".",
"strip",
"(",
"separator",
")",
"if",
"not",
"max_length",
":",
"return",
"value... | Truncate a string | [
"Truncate",
"a",
"string"
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/slugify.py#L78-L103 | train | 226,088 |
quantmind/pulsar | pulsar/utils/pylib/redisparser.py | RedisParser.get | def get(self):
'''Called by the protocol consumer'''
if self._current:
return self._resume(self._current, False)
else:
return self._get(None) | python | def get(self):
'''Called by the protocol consumer'''
if self._current:
return self._resume(self._current, False)
else:
return self._get(None) | [
"def",
"get",
"(",
"self",
")",
":",
"if",
"self",
".",
"_current",
":",
"return",
"self",
".",
"_resume",
"(",
"self",
".",
"_current",
",",
"False",
")",
"else",
":",
"return",
"self",
".",
"_get",
"(",
"None",
")"
] | Called by the protocol consumer | [
"Called",
"by",
"the",
"protocol",
"consumer"
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/pylib/redisparser.py#L86-L91 | train | 226,089 |
quantmind/pulsar | pulsar/utils/pylib/redisparser.py | RedisParser.pack_pipeline | def pack_pipeline(self, commands):
'''Packs pipeline commands into bytes.'''
return b''.join(
starmap(lambda *args: b''.join(self._pack_command(args)),
(a for a, _ in commands))) | python | def pack_pipeline(self, commands):
'''Packs pipeline commands into bytes.'''
return b''.join(
starmap(lambda *args: b''.join(self._pack_command(args)),
(a for a, _ in commands))) | [
"def",
"pack_pipeline",
"(",
"self",
",",
"commands",
")",
":",
"return",
"b''",
".",
"join",
"(",
"starmap",
"(",
"lambda",
"*",
"args",
":",
"b''",
".",
"join",
"(",
"self",
".",
"_pack_command",
"(",
"args",
")",
")",
",",
"(",
"a",
"for",
"a",
... | Packs pipeline commands into bytes. | [
"Packs",
"pipeline",
"commands",
"into",
"bytes",
"."
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/pylib/redisparser.py#L114-L118 | train | 226,090 |
quantmind/pulsar | pulsar/cmds/pypi_version.py | PyPi.pypi_release | def pypi_release(self):
"""Get the latest pypi release
"""
meta = self.distribution.metadata
pypi = ServerProxy(self.pypi_index_url)
releases = pypi.package_releases(meta.name)
if releases:
return next(iter(sorted(releases, reverse=True))) | python | def pypi_release(self):
"""Get the latest pypi release
"""
meta = self.distribution.metadata
pypi = ServerProxy(self.pypi_index_url)
releases = pypi.package_releases(meta.name)
if releases:
return next(iter(sorted(releases, reverse=True))) | [
"def",
"pypi_release",
"(",
"self",
")",
":",
"meta",
"=",
"self",
".",
"distribution",
".",
"metadata",
"pypi",
"=",
"ServerProxy",
"(",
"self",
".",
"pypi_index_url",
")",
"releases",
"=",
"pypi",
".",
"package_releases",
"(",
"meta",
".",
"name",
")",
... | Get the latest pypi release | [
"Get",
"the",
"latest",
"pypi",
"release"
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/cmds/pypi_version.py#L48-L55 | train | 226,091 |
quantmind/pulsar | pulsar/async/access.py | is_mainthread | def is_mainthread(thread=None):
'''Check if thread is the main thread.
If ``thread`` is not supplied check the current thread
'''
thread = thread if thread is not None else current_thread()
return isinstance(thread, threading._MainThread) | python | def is_mainthread(thread=None):
'''Check if thread is the main thread.
If ``thread`` is not supplied check the current thread
'''
thread = thread if thread is not None else current_thread()
return isinstance(thread, threading._MainThread) | [
"def",
"is_mainthread",
"(",
"thread",
"=",
"None",
")",
":",
"thread",
"=",
"thread",
"if",
"thread",
"is",
"not",
"None",
"else",
"current_thread",
"(",
")",
"return",
"isinstance",
"(",
"thread",
",",
"threading",
".",
"_MainThread",
")"
] | Check if thread is the main thread.
If ``thread`` is not supplied check the current thread | [
"Check",
"if",
"thread",
"is",
"the",
"main",
"thread",
"."
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/async/access.py#L113-L119 | train | 226,092 |
quantmind/pulsar | pulsar/async/access.py | process_data | def process_data(name=None):
'''Fetch the current process local data dictionary.
If ``name`` is not ``None`` it returns the value at``name``,
otherwise it return the process data dictionary
'''
ct = current_process()
if not hasattr(ct, '_pulsar_local'):
ct._pulsar_local = {}
loc = ct._pulsar_local
return loc.get(name) if name else loc | python | def process_data(name=None):
'''Fetch the current process local data dictionary.
If ``name`` is not ``None`` it returns the value at``name``,
otherwise it return the process data dictionary
'''
ct = current_process()
if not hasattr(ct, '_pulsar_local'):
ct._pulsar_local = {}
loc = ct._pulsar_local
return loc.get(name) if name else loc | [
"def",
"process_data",
"(",
"name",
"=",
"None",
")",
":",
"ct",
"=",
"current_process",
"(",
")",
"if",
"not",
"hasattr",
"(",
"ct",
",",
"'_pulsar_local'",
")",
":",
"ct",
".",
"_pulsar_local",
"=",
"{",
"}",
"loc",
"=",
"ct",
".",
"_pulsar_local",
... | Fetch the current process local data dictionary.
If ``name`` is not ``None`` it returns the value at``name``,
otherwise it return the process data dictionary | [
"Fetch",
"the",
"current",
"process",
"local",
"data",
"dictionary",
"."
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/async/access.py#L126-L136 | train | 226,093 |
quantmind/pulsar | pulsar/async/access.py | thread_data | def thread_data(name, value=NOTHING, ct=None):
'''Set or retrieve an attribute ``name`` from thread ``ct``.
If ``ct`` is not given used the current thread. If ``value``
is None, it will get the value otherwise it will set the value.
'''
ct = ct or current_thread()
if is_mainthread(ct):
loc = process_data()
elif not hasattr(ct, '_pulsar_local'):
ct._pulsar_local = loc = {}
else:
loc = ct._pulsar_local
if value is not NOTHING:
if name in loc:
if loc[name] is not value:
raise RuntimeError(
'%s is already available on this thread' % name)
else:
loc[name] = value
return loc.get(name) | python | def thread_data(name, value=NOTHING, ct=None):
'''Set or retrieve an attribute ``name`` from thread ``ct``.
If ``ct`` is not given used the current thread. If ``value``
is None, it will get the value otherwise it will set the value.
'''
ct = ct or current_thread()
if is_mainthread(ct):
loc = process_data()
elif not hasattr(ct, '_pulsar_local'):
ct._pulsar_local = loc = {}
else:
loc = ct._pulsar_local
if value is not NOTHING:
if name in loc:
if loc[name] is not value:
raise RuntimeError(
'%s is already available on this thread' % name)
else:
loc[name] = value
return loc.get(name) | [
"def",
"thread_data",
"(",
"name",
",",
"value",
"=",
"NOTHING",
",",
"ct",
"=",
"None",
")",
":",
"ct",
"=",
"ct",
"or",
"current_thread",
"(",
")",
"if",
"is_mainthread",
"(",
"ct",
")",
":",
"loc",
"=",
"process_data",
"(",
")",
"elif",
"not",
"... | Set or retrieve an attribute ``name`` from thread ``ct``.
If ``ct`` is not given used the current thread. If ``value``
is None, it will get the value otherwise it will set the value. | [
"Set",
"or",
"retrieve",
"an",
"attribute",
"name",
"from",
"thread",
"ct",
"."
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/async/access.py#L139-L159 | train | 226,094 |
quantmind/pulsar | pulsar/utils/internet.py | parse_address | def parse_address(netloc, default_port=8000):
'''Parse an internet address ``netloc`` and return a tuple with
``host`` and ``port``.'''
if isinstance(netloc, tuple):
if len(netloc) != 2:
raise ValueError('Invalid address %s' % str(netloc))
return netloc
#
netloc = native_str(netloc)
auth = None
# Check if auth is available
if '@' in netloc:
auth, netloc = netloc.split('@')
if netloc.startswith("unix:"):
host = netloc.split("unix:")[1]
return '%s@%s' % (auth, host) if auth else host
# get host
if '[' in netloc and ']' in netloc:
host = netloc.split(']')[0][1:].lower()
elif ':' in netloc:
host = netloc.split(':')[0].lower()
elif netloc == "":
host = "0.0.0.0"
else:
host = netloc.lower()
# get port
netloc = netloc.split(']')[-1]
if ":" in netloc:
port = netloc.split(':', 1)[1]
if not port.isdigit():
raise ValueError("%r is not a valid port number." % port)
port = int(port)
else:
port = default_port
return ('%s@%s' % (auth, host) if auth else host, port) | python | def parse_address(netloc, default_port=8000):
'''Parse an internet address ``netloc`` and return a tuple with
``host`` and ``port``.'''
if isinstance(netloc, tuple):
if len(netloc) != 2:
raise ValueError('Invalid address %s' % str(netloc))
return netloc
#
netloc = native_str(netloc)
auth = None
# Check if auth is available
if '@' in netloc:
auth, netloc = netloc.split('@')
if netloc.startswith("unix:"):
host = netloc.split("unix:")[1]
return '%s@%s' % (auth, host) if auth else host
# get host
if '[' in netloc and ']' in netloc:
host = netloc.split(']')[0][1:].lower()
elif ':' in netloc:
host = netloc.split(':')[0].lower()
elif netloc == "":
host = "0.0.0.0"
else:
host = netloc.lower()
# get port
netloc = netloc.split(']')[-1]
if ":" in netloc:
port = netloc.split(':', 1)[1]
if not port.isdigit():
raise ValueError("%r is not a valid port number." % port)
port = int(port)
else:
port = default_port
return ('%s@%s' % (auth, host) if auth else host, port) | [
"def",
"parse_address",
"(",
"netloc",
",",
"default_port",
"=",
"8000",
")",
":",
"if",
"isinstance",
"(",
"netloc",
",",
"tuple",
")",
":",
"if",
"len",
"(",
"netloc",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Invalid address %s'",
"%",
"str",... | Parse an internet address ``netloc`` and return a tuple with
``host`` and ``port``. | [
"Parse",
"an",
"internet",
"address",
"netloc",
"and",
"return",
"a",
"tuple",
"with",
"host",
"and",
"port",
"."
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/internet.py#L17-L51 | train | 226,095 |
quantmind/pulsar | pulsar/utils/internet.py | is_socket_closed | def is_socket_closed(sock): # pragma nocover
"""Check if socket ``sock`` is closed."""
if not sock:
return True
try:
if not poll: # pragma nocover
if not select:
return False
try:
return bool(select([sock], [], [], 0.0)[0])
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
except Exception:
return True | python | def is_socket_closed(sock): # pragma nocover
"""Check if socket ``sock`` is closed."""
if not sock:
return True
try:
if not poll: # pragma nocover
if not select:
return False
try:
return bool(select([sock], [], [], 0.0)[0])
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
except Exception:
return True | [
"def",
"is_socket_closed",
"(",
"sock",
")",
":",
"# pragma nocover",
"if",
"not",
"sock",
":",
"return",
"True",
"try",
":",
"if",
"not",
"poll",
":",
"# pragma nocover",
"if",
"not",
"select",
":",
"return",
"False",
"try",
":",
"return",
"bool",
"(",... | Check if socket ``sock`` is closed. | [
"Check",
"if",
"socket",
"sock",
"is",
"closed",
"."
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/internet.py#L101-L121 | train | 226,096 |
quantmind/pulsar | pulsar/utils/internet.py | close_socket | def close_socket(sock):
'''Shutdown and close the socket.'''
if sock:
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
try:
sock.close()
except Exception:
pass | python | def close_socket(sock):
'''Shutdown and close the socket.'''
if sock:
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
try:
sock.close()
except Exception:
pass | [
"def",
"close_socket",
"(",
"sock",
")",
":",
"if",
"sock",
":",
"try",
":",
"sock",
".",
"shutdown",
"(",
"socket",
".",
"SHUT_RDWR",
")",
"except",
"Exception",
":",
"pass",
"try",
":",
"sock",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"p... | Shutdown and close the socket. | [
"Shutdown",
"and",
"close",
"the",
"socket",
"."
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/internet.py#L124-L134 | train | 226,097 |
quantmind/pulsar | pulsar/apps/rpc/mixins.py | PulsarServerCommands.rpc_server_info | async def rpc_server_info(self, request):
'''Return a dictionary of information regarding the server and workers.
It invokes the :meth:`extra_server_info` for adding custom
information.
'''
info = await send('arbiter', 'info')
info = self.extra_server_info(request, info)
try:
info = await info
except TypeError:
pass
return info | python | async def rpc_server_info(self, request):
'''Return a dictionary of information regarding the server and workers.
It invokes the :meth:`extra_server_info` for adding custom
information.
'''
info = await send('arbiter', 'info')
info = self.extra_server_info(request, info)
try:
info = await info
except TypeError:
pass
return info | [
"async",
"def",
"rpc_server_info",
"(",
"self",
",",
"request",
")",
":",
"info",
"=",
"await",
"send",
"(",
"'arbiter'",
",",
"'info'",
")",
"info",
"=",
"self",
".",
"extra_server_info",
"(",
"request",
",",
"info",
")",
"try",
":",
"info",
"=",
"awa... | Return a dictionary of information regarding the server and workers.
It invokes the :meth:`extra_server_info` for adding custom
information. | [
"Return",
"a",
"dictionary",
"of",
"information",
"regarding",
"the",
"server",
"and",
"workers",
"."
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/rpc/mixins.py#L18-L30 | train | 226,098 |
quantmind/pulsar | pulsar/utils/pylib/events.py | Event.bind | def bind(self, callback):
"""Bind a ``callback`` to this event.
"""
handlers = self._handlers
if self._self is None:
raise RuntimeError('%s already fired, cannot add callbacks' % self)
if handlers is None:
handlers = []
self._handlers = handlers
handlers.append(callback) | python | def bind(self, callback):
"""Bind a ``callback`` to this event.
"""
handlers = self._handlers
if self._self is None:
raise RuntimeError('%s already fired, cannot add callbacks' % self)
if handlers is None:
handlers = []
self._handlers = handlers
handlers.append(callback) | [
"def",
"bind",
"(",
"self",
",",
"callback",
")",
":",
"handlers",
"=",
"self",
".",
"_handlers",
"if",
"self",
".",
"_self",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'%s already fired, cannot add callbacks'",
"%",
"self",
")",
"if",
"handlers",
"is... | Bind a ``callback`` to this event. | [
"Bind",
"a",
"callback",
"to",
"this",
"event",
"."
] | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/pylib/events.py#L41-L50 | train | 226,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.