text stringlengths 81 112k |
|---|
Override.
Make sure that the kind returned is the root class of the
polymorphic hierarchy.
def _get_kind(cls):
"""Override.
Make sure that the kind returned is the root class of the
polymorphic hierarchy.
"""
bases = cls._get_hierarchy()
if not bases:
# We have to jump through some hoops to call the superclass'
# _get_kind() method. First, this is called by the metaclass
# before the PolyModel name is defined, so it can't use
# super(PolyModel, cls)._get_kind(). Second, we can't just call
# Model._get_kind() because that always returns 'Model'. Hence
# the 'im_func' hack.
return model.Model._get_kind.im_func(cls)
else:
return bases[0]._class_name() |
Internal helper to return the list of polymorphic base classes.
This returns a list of class objects, e.g. [Animal, Feline, Cat].
def _get_hierarchy(cls):
"""Internal helper to return the list of polymorphic base classes.
This returns a list of class objects, e.g. [Animal, Feline, Cat].
"""
bases = []
for base in cls.mro(): # pragma: no branch
if hasattr(base, '_get_hierarchy'):
bases.append(base)
del bases[-1] # Delete PolyModel itself
bases.reverse()
return bases |
Returns all potential envs in a basedir
def find_env_paths_in_basedirs(base_dirs):
"""Returns all potential envs in a basedir"""
# get potential env path in the base_dirs
env_path = []
for base_dir in base_dirs:
env_path.extend(glob.glob(os.path.join(
os.path.expanduser(base_dir), '*', '')))
# self.log.info("Found the following kernels from config: %s", ", ".join(venvs))
return env_path |
Converts a list of paths to environments to env_data.
env_data is a structure {name -> (ressourcedir, kernel spec)}
def convert_to_env_data(mgr, env_paths, validator_func, activate_func,
name_template, display_name_template, name_prefix):
"""Converts a list of paths to environments to env_data.
env_data is a structure {name -> (ressourcedir, kernel spec)}
"""
env_data = {}
for venv_dir in env_paths:
venv_name = os.path.split(os.path.abspath(venv_dir))[1]
kernel_name = name_template.format(name_prefix + venv_name)
kernel_name = kernel_name.lower()
if kernel_name in env_data:
mgr.log.debug(
"Found duplicate env kernel: %s, which would again point to %s. Using the first!",
kernel_name, venv_dir)
continue
argv, language, resource_dir = validator_func(venv_dir)
if not argv:
# probably does not contain the kernel type (e.g. not R or python or does not contain
# the kernel code itself)
continue
display_name = display_name_template.format(kernel_name)
kspec_dict = {"argv": argv, "language": language,
"display_name": display_name,
"resource_dir": resource_dir
}
# the default vars are needed to save the vars in the function context
def loader(env_dir=venv_dir, activate_func=activate_func, mgr=mgr):
mgr.log.debug("Loading env data for %s" % env_dir)
res = activate_func(mgr, env_dir)
# mgr.log.info("PATH: %s" % res['PATH'])
return res
kspec = EnvironmentLoadingKernelSpec(loader, **kspec_dict)
env_data.update({kernel_name: (resource_dir, kspec)})
return env_data |
Validates that this env contains an IPython kernel and returns info to start it
Returns: tuple
(ARGV, language, resource_dir)
def validate_IPykernel(venv_dir):
"""Validates that this env contains an IPython kernel and returns info to start it
Returns: tuple
(ARGV, language, resource_dir)
"""
python_exe_name = find_exe(venv_dir, "python")
if python_exe_name is None:
python_exe_name = find_exe(venv_dir, "python2")
if python_exe_name is None:
python_exe_name = find_exe(venv_dir, "python3")
if python_exe_name is None:
return [], None, None
# Make some checks for ipython first, because calling the import is expensive
if find_exe(venv_dir, "ipython") is None:
if find_exe(venv_dir, "ipython2") is None:
if find_exe(venv_dir, "ipython3") is None:
return [], None, None
# check if this is really an ipython **kernel**
import subprocess
try:
subprocess.check_call([python_exe_name, '-c', '"import ipykernel"'])
except:
# not installed? -> not useable in any case...
return [], None, None
argv = [python_exe_name, "-m", "ipykernel", "-f", "{connection_file}"]
resources_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logos", "python")
return argv, "python", resources_dir |
Validates that this env contains an IRkernel kernel and returns info to start it
Returns: tuple
(ARGV, language, resource_dir)
def validate_IRkernel(venv_dir):
"""Validates that this env contains an IRkernel kernel and returns info to start it
Returns: tuple
(ARGV, language, resource_dir)
"""
r_exe_name = find_exe(venv_dir, "R")
if r_exe_name is None:
return [], None, None
# check if this is really an IRkernel **kernel**
import subprocess
ressources_dir = None
try:
print_resources = 'cat(as.character(system.file("kernelspec", package = "IRkernel")))'
resources_dir_bytes = subprocess.check_output([r_exe_name, '--slave', '-e', print_resources])
resources_dir = resources_dir_bytes.decode(errors='ignore')
except:
# not installed? -> not useable in any case...
return [], None, None
argv = [r_exe_name, "--slave", "-e", "IRkernel::main()", "--args", "{connection_file}"]
if not os.path.exists(resources_dir.strip()):
# Fallback to our own log, but don't get the nice js goodies...
resources_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logos", "r")
return argv, "r", resources_dir |
Finds a exe with that name in the environment path
def find_exe(env_dir, name):
"""Finds a exe with that name in the environment path"""
if platform.system() == "Windows":
name = name + ".exe"
# find the binary
exe_name = os.path.join(env_dir, name)
if not os.path.exists(exe_name):
exe_name = os.path.join(env_dir, "bin", name)
if not os.path.exists(exe_name):
exe_name = os.path.join(env_dir, "Scripts", name)
if not os.path.exists(exe_name):
return None
return exe_name |
Finds kernel specs from virtualenv environments
env_data is a structure {name -> (resourcedir, kernel spec)}
def get_virtualenv_env_data(mgr):
"""Finds kernel specs from virtualenv environments
env_data is a structure {name -> (resourcedir, kernel spec)}
"""
if not mgr.find_virtualenv_envs:
return {}
mgr.log.debug("Looking for virtualenv environments in %s...", mgr.virtualenv_env_dirs)
# find all potential env paths
env_paths = find_env_paths_in_basedirs(mgr.virtualenv_env_dirs)
mgr.log.debug("Scanning virtualenv environments for python kernels...")
env_data = convert_to_env_data(mgr=mgr,
env_paths=env_paths,
validator_func=validate_IPykernel,
activate_func=_get_env_vars_for_virtualenv_env,
name_template=mgr.virtualenv_prefix_template,
display_name_template=mgr.display_name_template,
# virtualenv has only python, so no need for a prefix
name_prefix="")
return env_data |
Simply bash-specific wrapper around source-foreign
Returns a dict to be used as a new environment
def source_bash(args, stdin=None):
"""Simply bash-specific wrapper around source-foreign
Returns a dict to be used as a new environment"""
args = list(args)
new_args = ['bash', '--sourcer=source']
new_args.extend(args)
return source_foreign(new_args, stdin=stdin) |
Simply zsh-specific wrapper around source-foreign
Returns a dict to be used as a new environment
def source_zsh(args, stdin=None):
"""Simply zsh-specific wrapper around source-foreign
Returns a dict to be used as a new environment"""
args = list(args)
new_args = ['zsh', '--sourcer=source']
new_args.extend(args)
return source_foreign(new_args, stdin=stdin) |
Simple cmd.exe-specific wrapper around source-foreign.
returns a dict to be used as a new environment
def source_cmd(args, stdin=None):
"""Simple cmd.exe-specific wrapper around source-foreign.
returns a dict to be used as a new environment
"""
args = list(args)
fpath = locate_binary(args[0])
args[0] = fpath if fpath else args[0]
if not os.path.isfile(args[0]):
raise RuntimeError("Command not found: %s" % args[0])
prevcmd = 'call '
prevcmd += ' '.join([argvquote(arg, force=True) for arg in args])
prevcmd = escape_windows_cmd_string(prevcmd)
args.append('--prevcmd={}'.format(prevcmd))
args.insert(0, 'cmd')
args.append('--interactive=0')
args.append('--sourcer=call')
args.append('--envcmd=set')
args.append('--seterrpostcmd=if errorlevel 1 exit 1')
args.append('--use-tmpfile=1')
return source_foreign(args, stdin=stdin) |
Returns an argument quoted in such a way that that CommandLineToArgvW
on Windows will return the argument string unchanged.
This is the same thing Popen does when supplied with an list of arguments.
Arguments in a command line should be separated by spaces; this
function does not add these spaces. This implementation follows the
suggestions outlined here:
https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
def argvquote(arg, force=False):
""" Returns an argument quoted in such a way that that CommandLineToArgvW
on Windows will return the argument string unchanged.
This is the same thing Popen does when supplied with an list of arguments.
Arguments in a command line should be separated by spaces; this
function does not add these spaces. This implementation follows the
suggestions outlined here:
https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
"""
if not force and len(arg) != 0 and not any([c in arg for c in ' \t\n\v"']):
return arg
else:
n_backslashes = 0
cmdline = '"'
for c in arg:
if c == "\\":
# first count the number of current backslashes
n_backslashes += 1
continue
if c == '"':
# Escape all backslashes and the following double quotation mark
cmdline += (n_backslashes * 2 + 1) * '\\'
else:
# backslashes are not special here
cmdline += n_backslashes * '\\'
n_backslashes = 0
cmdline += c
# Escape all backslashes, but let the terminating
# double quotation mark we add below be interpreted
# as a metacharacter
cmdline += + n_backslashes * 2 * '\\' + '"'
return cmdline |
Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and emperical testing:
http://www.robvanderwoude.com/escapechars.php
def escape_windows_cmd_string(s):
"""Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and emperical testing:
http://www.robvanderwoude.com/escapechars.php
"""
for c in '()%!^<>&|"':
s = s.replace(c, '^' + c)
s = s.replace('/?', '/.')
return s |
Sources a file written in a foreign shell language.
def source_foreign(args, stdin=None):
"""Sources a file written in a foreign shell language."""
parser = _ensure_source_foreign_parser()
ns = parser.parse_args(args)
if ns.prevcmd is not None:
pass # don't change prevcmd if given explicitly
elif os.path.isfile(ns.files_or_code[0]):
# we have filename to source
ns.prevcmd = '{} "{}"'.format(ns.sourcer, '" "'.join(ns.files_or_code))
elif ns.prevcmd is None:
ns.prevcmd = ' '.join(ns.files_or_code) # code to run, no files
fsenv = foreign_shell_data(shell=ns.shell, login=ns.login,
interactive=ns.interactive,
envcmd=ns.envcmd,
aliascmd=ns.aliascmd,
extra_args=ns.extra_args,
safe=ns.safe, prevcmd=ns.prevcmd,
postcmd=ns.postcmd,
funcscmd=ns.funcscmd,
sourcer=ns.sourcer,
use_tmpfile=ns.use_tmpfile,
seterrprevcmd=ns.seterrprevcmd,
seterrpostcmd=ns.seterrpostcmd)
if fsenv is None:
raise RuntimeError("Source failed: {}\n".format(ns.prevcmd), 1)
# apply results
env = os.environ.copy()
for k, v in fsenv.items():
if k in env and v == env[k]:
continue # no change from original
env[k] = v
# Remove any env-vars that were unset by the script.
for k in os.environ: # use os.environ again to prevent errors about changed size
if k not in fsenv:
env.pop(k, None)
return env |
Checks that path is an executable regular file, or a symlink towards one.
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
This function was forked from pexpect originally:
Copyright (c) 2013-2014, Pexpect development team
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
def _is_executable_file(path):
"""Checks that path is an executable regular file, or a symlink towards one.
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
This function was forked from pexpect originally:
Copyright (c) 2013-2014, Pexpect development team
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# follow symlinks,
fpath = os.path.realpath(path)
if not os.path.isfile(fpath):
# non-files (directories, fifo, etc.)
return False
return os.access(fpath, os.X_OK) |
Extracts data from a foreign (non-xonsh) shells. Currently this gets
the environment, aliases, and functions but may be extended in the future.
Parameters
----------
shell : str
The name of the shell, such as 'bash' or '/bin/sh'.
interactive : bool, optional
Whether the shell should be run in interactive mode.
login : bool, optional
Whether the shell should be a login shell.
envcmd : str or None, optional
The command to generate environment output with.
aliascmd : str or None, optional
The command to generate alias output with.
extra_args : tuple of str, optional
Addtional command line options to pass into the shell.
currenv : tuple of items or None, optional
Manual override for the current environment.
safe : bool, optional
Flag for whether or not to safely handle exceptions and other errors.
prevcmd : str, optional
A command to run in the shell before anything else, useful for
sourcing and other commands that may require environment recovery.
postcmd : str, optional
A command to run after everything else, useful for cleaning up any
damage that the prevcmd may have caused.
funcscmd : str or None, optional
This is a command or script that can be used to determine the names
and locations of any functions that are native to the foreign shell.
This command should print *only* a JSON object that maps
function names to the filenames where the functions are defined.
If this is None, then a default script will attempted to be looked
up based on the shell name. Callable wrappers for these functions
will be returned in the aliases dictionary.
sourcer : str or None, optional
How to source a foreign shell file for purposes of calling functions
in that shell. If this is None, a default value will attempt to be
looked up based on the shell name.
use_tmpfile : bool, optional
This specifies if the commands are written to a tmp file or just
parsed directly to the shell
tmpfile_ext : str or None, optional
If tmpfile is True this sets specifies the extension used.
runcmd : str or None, optional
Command line switches to use when running the script, such as
-c for Bash and /C for cmd.exe.
seterrprevcmd : str or None, optional
Command that enables exit-on-error for the shell that is run at the
start of the script. For example, this is "set -e" in Bash. To disable
exit-on-error behavior, simply pass in an empty string.
seterrpostcmd : str or None, optional
Command that enables exit-on-error for the shell that is run at the end
of the script. For example, this is "if errorlevel 1 exit 1" in
cmd.exe. To disable exit-on-error behavior, simply pass in an
empty string.
Returns
-------
env : dict
Dictionary of shell's environment
aliases : dict
Dictionary of shell's alaiases, this includes foreign function
wrappers.
def foreign_shell_data(shell, interactive=True, login=False, envcmd=None,
aliascmd=None, extra_args=(), currenv=None,
safe=False, prevcmd='', postcmd='', funcscmd=None,
sourcer=None, use_tmpfile=False, tmpfile_ext=None,
runcmd=None, seterrprevcmd=None, seterrpostcmd=None):
"""Extracts data from a foreign (non-xonsh) shells. Currently this gets
the environment, aliases, and functions but may be extended in the future.
Parameters
----------
shell : str
The name of the shell, such as 'bash' or '/bin/sh'.
interactive : bool, optional
Whether the shell should be run in interactive mode.
login : bool, optional
Whether the shell should be a login shell.
envcmd : str or None, optional
The command to generate environment output with.
aliascmd : str or None, optional
The command to generate alias output with.
extra_args : tuple of str, optional
Addtional command line options to pass into the shell.
currenv : tuple of items or None, optional
Manual override for the current environment.
safe : bool, optional
Flag for whether or not to safely handle exceptions and other errors.
prevcmd : str, optional
A command to run in the shell before anything else, useful for
sourcing and other commands that may require environment recovery.
postcmd : str, optional
A command to run after everything else, useful for cleaning up any
damage that the prevcmd may have caused.
funcscmd : str or None, optional
This is a command or script that can be used to determine the names
and locations of any functions that are native to the foreign shell.
This command should print *only* a JSON object that maps
function names to the filenames where the functions are defined.
If this is None, then a default script will attempted to be looked
up based on the shell name. Callable wrappers for these functions
will be returned in the aliases dictionary.
sourcer : str or None, optional
How to source a foreign shell file for purposes of calling functions
in that shell. If this is None, a default value will attempt to be
looked up based on the shell name.
use_tmpfile : bool, optional
This specifies if the commands are written to a tmp file or just
parsed directly to the shell
tmpfile_ext : str or None, optional
If tmpfile is True this sets specifies the extension used.
runcmd : str or None, optional
Command line switches to use when running the script, such as
-c for Bash and /C for cmd.exe.
seterrprevcmd : str or None, optional
Command that enables exit-on-error for the shell that is run at the
start of the script. For example, this is "set -e" in Bash. To disable
exit-on-error behavior, simply pass in an empty string.
seterrpostcmd : str or None, optional
Command that enables exit-on-error for the shell that is run at the end
of the script. For example, this is "if errorlevel 1 exit 1" in
cmd.exe. To disable exit-on-error behavior, simply pass in an
empty string.
Returns
-------
env : dict
Dictionary of shell's environment
aliases : dict
Dictionary of shell's alaiases, this includes foreign function
wrappers.
"""
cmd = [shell]
cmd.extend(extra_args) # needs to come here for GNU long options
if interactive:
cmd.append('-i')
if login:
cmd.append('-l')
shkey = CANON_SHELL_NAMES[shell]
envcmd = DEFAULT_ENVCMDS.get(shkey, 'env') if envcmd is None else envcmd
tmpfile_ext = DEFAULT_TMPFILE_EXT.get(shkey, 'sh') if tmpfile_ext is None else tmpfile_ext
runcmd = DEFAULT_RUNCMD.get(shkey, '-c') if runcmd is None else runcmd
seterrprevcmd = DEFAULT_SETERRPREVCMD.get(shkey, '') \
if seterrprevcmd is None else seterrprevcmd
seterrpostcmd = DEFAULT_SETERRPOSTCMD.get(shkey, '') \
if seterrpostcmd is None else seterrpostcmd
command = COMMAND.format(envcmd=envcmd, prevcmd=prevcmd,
postcmd=postcmd,
seterrprevcmd=seterrprevcmd,
seterrpostcmd=seterrpostcmd).strip()
cmd.append(runcmd)
if not use_tmpfile:
cmd.append(command)
else:
tmpfile = NamedTemporaryFile(suffix=tmpfile_ext, delete=False)
tmpfile.write(command.encode('utf8'))
tmpfile.close()
cmd.append(tmpfile.name)
if currenv is not None:
currenv = os.environ
try:
s = subprocess.check_output(cmd, stderr=subprocess.PIPE, env=currenv,
# start new session to avoid hangs
start_new_session=True,
universal_newlines=True)
except (subprocess.CalledProcessError, FileNotFoundError):
if not safe:
raise
return None, None
finally:
if use_tmpfile:
pass
os.remove(tmpfile.name)
env = parse_env(s)
return env |
Converts to a boolean in a semantically meaningful way.
def to_bool(x):
""""Converts to a boolean in a semantically meaningful way."""
if isinstance(x, bool):
return x
elif isinstance(x, str):
return False if x.lower() in _FALSES else True
else:
return bool(x) |
Parses the environment portion of string into a dict.
def parse_env(s):
"""Parses the environment portion of string into a dict."""
m = ENV_RE.search(s)
if m is None:
return {}
g1 = m.group(1)
env = dict(ENV_SPLIT_RE.findall(g1))
return env |
Finds kernel specs from conda environments
env_data is a structure {name -> (resourcedir, kernel spec)}
def get_conda_env_data(mgr):
"""Finds kernel specs from conda environments
env_data is a structure {name -> (resourcedir, kernel spec)}
"""
if not mgr.find_conda_envs:
return {}
mgr.log.debug("Looking for conda environments in %s...", mgr.conda_env_dirs)
# find all potential env paths
env_paths = find_env_paths_in_basedirs(mgr.conda_env_dirs)
env_paths.extend(_find_conda_env_paths_from_conda(mgr))
env_paths = list(set(env_paths)) # remove duplicates
mgr.log.debug("Scanning conda environments for python kernels...")
env_data = convert_to_env_data(mgr=mgr,
env_paths=env_paths,
validator_func=validate_IPykernel,
activate_func=_get_env_vars_for_conda_env,
name_template=mgr.conda_prefix_template,
display_name_template=mgr.display_name_template,
name_prefix="") # lets keep the py kernels without a prefix...
if mgr.find_r_envs:
mgr.log.debug("Scanning conda environments for R kernels...")
env_data.update(convert_to_env_data(mgr=mgr,
env_paths=env_paths,
validator_func=validate_IRkernel,
activate_func=_get_env_vars_for_conda_env,
name_template=mgr.conda_prefix_template,
display_name_template=mgr.display_name_template,
name_prefix="r_"))
return env_data |
Returns a list of path as given by `conda env list --json`.
Returns empty list, if conda couldn't be called.
def _find_conda_env_paths_from_conda(mgr):
"""Returns a list of path as given by `conda env list --json`.
Returns empty list, if conda couldn't be called.
"""
# this is expensive, so make it configureable...
if not mgr.use_conda_directly:
return []
mgr.log.debug("Looking for conda environments by calling conda directly...")
import subprocess
import json
try:
p = subprocess.Popen(
['conda', 'env', 'list', '--json'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
comm = p.communicate()
output = comm[0].decode()
if p.returncode != 0 or len(output) == 0:
mgr.log.error(
"Couldn't call 'conda' to get the environments. "
"Output:\n%s", str(comm))
return []
except FileNotFoundError:
mgr.log.error("'conda' not found in path.")
return []
output = json.loads(output)
envs = output["envs"]
# self.log.info("Found the following kernels from conda: %s", ", ".join(envs))
return envs |
Check the name of the environment against the black list and the
whitelist. If a whitelist is specified only it is checked.
def validate_env(self, envname):
"""
Check the name of the environment against the black list and the
whitelist. If a whitelist is specified only it is checked.
"""
if self.whitelist_envs and envname in self.whitelist_envs:
return True
elif self.whitelist_envs:
return False
if self.blacklist_envs and envname not in self.blacklist_envs:
return True
elif self.blacklist_envs:
# If there is just a True, all envs are blacklisted
return False
else:
return True |
Get the data about the available environments.
env_data is a structure {name -> (resourcedir, kernel spec)}
def _get_env_data(self, reload=False):
"""Get the data about the available environments.
env_data is a structure {name -> (resourcedir, kernel spec)}
"""
# This is called much too often and finding-process is really expensive :-(
if not reload and getattr(self, "_env_data_cache", {}):
return getattr(self, "_env_data_cache")
env_data = {}
for supplyer in ENV_SUPPLYER:
env_data.update(supplyer(self))
env_data = {name: env_data[name] for name in env_data if self.validate_env(name)}
new_kernels = [env for env in list(env_data.keys()) if env not in list(self._env_data_cache.keys())]
if new_kernels:
self.log.info("Found new kernels in environments: %s", ", ".join(new_kernels))
self._env_data_cache = env_data
return env_data |
Returns a dict mapping kernel names to resource directories.
def find_kernel_specs_for_envs(self):
"""Returns a dict mapping kernel names to resource directories."""
data = self._get_env_data()
return {name: data[name][0] for name in data} |
Returns the dict of name -> kernel_spec for all environments
def get_all_kernel_specs_for_envs(self):
"""Returns the dict of name -> kernel_spec for all environments"""
data = self._get_env_data()
return {name: data[name][1] for name in data} |
Returns a dict mapping kernel names to resource directories.
def find_kernel_specs(self):
"""Returns a dict mapping kernel names to resource directories."""
# let real installed kernels overwrite envs with the same name:
# this is the same order as the get_kernel_spec way, which also prefers
# kernels from the jupyter dir over env kernels.
specs = self.find_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager,
self).find_kernel_specs())
return specs |
Returns a dict mapping kernel names and resource directories.
def get_all_specs(self):
"""Returns a dict mapping kernel names and resource directories.
"""
# This is new in 4.1 -> https://github.com/jupyter/jupyter_client/pull/93
specs = self.get_all_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager, self).get_all_specs())
return specs |
Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found.
def get_kernel_spec(self, kernel_name):
"""Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found.
"""
try:
return super(EnvironmentKernelSpecManager,
self).get_kernel_spec(kernel_name)
except (NoSuchKernel, FileNotFoundError):
venv_kernel_name = kernel_name.lower()
specs = self.get_all_kernel_specs_for_envs()
if venv_kernel_name in specs:
return specs[venv_kernel_name]
else:
raise NoSuchKernel(kernel_name) |
Transliterate serbian cyrillic string of characters to latin string of characters.
:param string_to_transliterate: The cyrillic string to transliterate into latin characters.
:param lang_code: Indicates the cyrillic language code we are translating from. Defaults to Serbian (sr).
:return: A string of latin characters transliterated from the given cyrillic string.
def to_latin(string_to_transliterate, lang_code='sr'):
''' Transliterate serbian cyrillic string of characters to latin string of characters.
:param string_to_transliterate: The cyrillic string to transliterate into latin characters.
:param lang_code: Indicates the cyrillic language code we are translating from. Defaults to Serbian (sr).
:return: A string of latin characters transliterated from the given cyrillic string.
'''
# First check if we support the cyrillic alphabet we want to transliterate to latin.
if lang_code.lower() not in TRANSLIT_DICT:
# If we don't support it, then just return the original string.
return string_to_transliterate
# If we do support it, check if the implementation is not missing before proceeding.
elif not TRANSLIT_DICT[lang_code.lower()]['tolatin']:
return string_to_transliterate
# Everything checks out, proceed with transliteration.
else:
# Get the character per character transliteration dictionary
transliteration_dict = TRANSLIT_DICT[lang_code.lower()]['tolatin']
# Initialize the output latin string variable
latinized_str = ''
# Transliterate by traversing the input string character by character.
string_to_transliterate = __decode_utf8(string_to_transliterate)
for c in string_to_transliterate:
# If character is in dictionary, it means it's a cyrillic so let's transliterate that character.
if c in transliteration_dict:
# Transliterate current character.
latinized_str += transliteration_dict[c]
# If character is not in character transliteration dictionary,
# it is most likely a number or a special character so just keep it.
else:
latinized_str += c
# Return the transliterated string.
return __encode_utf8(latinized_str) |
Transliterate serbian latin string of characters to cyrillic string of characters.
:param string_to_transliterate: The latin string to transliterate into cyrillic characters.
:param lang_code: Indicates the cyrillic language code we are translating to. Defaults to Serbian (sr).
:return: A string of cyrillic characters transliterated from the given latin string.
def to_cyrillic(string_to_transliterate, lang_code='sr'):
''' Transliterate serbian latin string of characters to cyrillic string of characters.
:param string_to_transliterate: The latin string to transliterate into cyrillic characters.
:param lang_code: Indicates the cyrillic language code we are translating to. Defaults to Serbian (sr).
:return: A string of cyrillic characters transliterated from the given latin string.
'''
# First check if we support the cyrillic alphabet we want to transliterate to latin.
if lang_code.lower() not in TRANSLIT_DICT:
# If we don't support it, then just return the original string.
return string_to_transliterate
# If we do support it, check if the implementation is not missing before proceeding.
elif not TRANSLIT_DICT[lang_code.lower()]['tocyrillic']:
return string_to_transliterate
else:
# Get the character per character transliteration dictionary
transliteration_dict = TRANSLIT_DICT[lang_code.lower()]['tocyrillic']
# Initialize the output cyrillic string variable
cyrillic_str = ''
string_to_transliterate = __decode_utf8(string_to_transliterate)
# Transliterate by traversing the inputted string character by character.
length_of_string_to_transliterate = len(string_to_transliterate)
index = 0
while index < length_of_string_to_transliterate:
# Grab a character from the string at the current index
c = string_to_transliterate[index]
# Watch out for Lj and lj. Don't want to interpret Lj/lj as L/l and j.
# Watch out for Nj and nj. Don't want to interpret Nj/nj as N/n and j.
# Watch out for Dž and and dž. Don't want to interpret Dž/dž as D/d and j.
c_plus_1 = u''
if index != length_of_string_to_transliterate - 1:
c_plus_1 = string_to_transliterate[index + 1]
if ((c == u'L' or c == u'l') and c_plus_1 == u'j') or \
((c == u'N' or c == u'n') and c_plus_1 == u'j') or \
((c == u'D' or c == u'd') and c_plus_1 == u'ž') or \
(lang_code == 'mk' and (c == u'D' or c == u'd') and c_plus_1 == u'z') or \
(lang_code == 'ru' and (
(c in u'Cc' and c_plus_1 in u'Hh') or # c, ch
(c in u'Ee' and c_plus_1 in u'Hh') or # eh
(c == u'i' and c_plus_1 == u'y' and
string_to_transliterate[index + 2:index + 3] not in u'aou') or # iy[^AaOoUu]
(c in u'Jj' and c_plus_1 in u'UuAaEe') or # j, ju, ja, je
(c in u'Ss' and c_plus_1 in u'HhZz') or # s, sh, sz
(c in u'Yy' and c_plus_1 in u'AaOoUu') or # y, ya, yo, yu
(c in u'Zz' and c_plus_1 in u'Hh') # z, zh
)):
index += 1
c += c_plus_1
# If character is in dictionary, it means it's a cyrillic so let's transliterate that character.
if c in transliteration_dict:
# ay, ey, iy, oy, uy
if lang_code == 'ru' and c in u'Yy' and \
cyrillic_str and cyrillic_str[-1].lower() in u"аеиоуэя":
cyrillic_str += u"й" if c == u'y' else u"Й"
else:
# Transliterate current character.
cyrillic_str += transliteration_dict[c]
# If character is not in character transliteration dictionary,
# it is most likely a number or a special character so just keep it.
else:
cyrillic_str += c
index += 1
return __encode_utf8(cyrillic_str) |
Parses the incoming bytestream as XML and returns the resulting data.
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as XML and returns the resulting data.
"""
assert etree, 'XMLParser requires defusedxml to be installed'
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
parser = etree.DefusedXMLParser(encoding=encoding)
try:
tree = etree.parse(stream, parser=parser, forbid_dtd=True)
except (etree.ParseError, ValueError) as exc:
raise ParseError('XML parse error - %s' % six.text_type(exc))
data = self._xml_convert(tree.getroot())
return data |
convert the xml `element` into the corresponding python object
def _xml_convert(self, element):
"""
convert the xml `element` into the corresponding python object
"""
children = list(element)
if len(children) == 0:
return self._type_convert(element.text)
else:
# if the fist child tag is list-item means all children are list-item
if children[0].tag == "list-item":
data = []
for child in children:
data.append(self._xml_convert(child))
else:
data = {}
for child in children:
data[child.tag] = self._xml_convert(child)
return data |
Converts the value returned by the XMl parse into the equivalent
Python type
def _type_convert(self, value):
"""
Converts the value returned by the XMl parse into the equivalent
Python type
"""
if value is None:
return value
try:
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
try:
return int(value)
except ValueError:
pass
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
pass
return value |
Renders `data` into serialized XML.
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders `data` into serialized XML.
"""
if data is None:
return ''
stream = StringIO()
xml = SimplerXMLGenerator(stream, self.charset)
xml.startDocument()
xml.startElement(self.root_tag_name, {})
self._to_xml(xml, data)
xml.endElement(self.root_tag_name)
xml.endDocument()
return stream.getvalue() |
Open a connection to the device.
def open(self):
"""Open a connection to the device."""
device_type = 'cisco_ios'
if self.transport == 'telnet':
device_type = 'cisco_ios_telnet'
self.device = ConnectHandler(device_type=device_type,
host=self.hostname,
username=self.username,
password=self.password,
**self.netmiko_optional_args)
# ensure in enable mode
self.device.enable() |
Write temp file and for use with inline config and SCP.
def _create_tmp_file(config):
"""Write temp file and for use with inline config and SCP."""
tmp_dir = tempfile.gettempdir()
rand_fname = py23_compat.text_type(uuid.uuid4())
filename = os.path.join(tmp_dir, rand_fname)
with open(filename, 'wt') as fobj:
fobj.write(config)
return filename |
Transfer file to remote device for either merge or replace operations
Returns (return_status, msg)
def _load_candidate_wrapper(self, source_file=None, source_config=None, dest_file=None,
file_system=None):
"""
Transfer file to remote device for either merge or replace operations
Returns (return_status, msg)
"""
return_status = False
msg = ''
if source_file and source_config:
raise ValueError("Cannot simultaneously set source_file and source_config")
if source_config:
if self.inline_transfer:
(return_status, msg) = self._inline_tcl_xfer(source_config=source_config,
dest_file=dest_file,
file_system=file_system)
else:
# Use SCP
tmp_file = self._create_tmp_file(source_config)
(return_status, msg) = self._scp_file(source_file=tmp_file, dest_file=dest_file,
file_system=file_system)
if tmp_file and os.path.isfile(tmp_file):
os.remove(tmp_file)
if source_file:
if self.inline_transfer:
(return_status, msg) = self._inline_tcl_xfer(source_file=source_file,
dest_file=dest_file,
file_system=file_system)
else:
(return_status, msg) = self._scp_file(source_file=source_file, dest_file=dest_file,
file_system=file_system)
if not return_status:
if msg == '':
msg = "Transfer to remote device failed"
return (return_status, msg) |
SCP file to device filesystem, defaults to candidate_config.
Return None or raise exception
def load_replace_candidate(self, filename=None, config=None):
"""
SCP file to device filesystem, defaults to candidate_config.
Return None or raise exception
"""
self.config_replace = True
return_status, msg = self._load_candidate_wrapper(source_file=filename,
source_config=config,
dest_file=self.candidate_cfg,
file_system=self.dest_file_system)
if not return_status:
raise ReplaceConfigException(msg) |
SCP file to remote device.
Merge configuration in: copy <file> running-config
def load_merge_candidate(self, filename=None, config=None):
"""
SCP file to remote device.
Merge configuration in: copy <file> running-config
"""
self.config_replace = False
return_status, msg = self._load_candidate_wrapper(source_file=filename,
source_config=config,
dest_file=self.merge_cfg,
file_system=self.dest_file_system)
if not return_status:
raise MergeConfigException(msg) |
Special handler for hostname change on commit operation.
def _commit_hostname_handler(self, cmd):
"""Special handler for hostname change on commit operation."""
current_prompt = self.device.find_prompt().strip()
terminating_char = current_prompt[-1]
pattern = r"[>#{}]\s*$".format(terminating_char)
# Look exclusively for trailing pattern that includes '#' and '>'
output = self.device.send_command_expect(cmd, expect_string=pattern)
# Reset base prompt in case hostname changed
self.device.set_base_prompt()
return output |
If replacement operation, perform 'configure replace' for the entire config.
If merge operation, perform copy <file> running-config.
def commit_config(self):
"""
If replacement operation, perform 'configure replace' for the entire config.
If merge operation, perform copy <file> running-config.
"""
# Always generate a rollback config on commit
self._gen_rollback_cfg()
if self.config_replace:
# Replace operation
filename = self.candidate_cfg
cfg_file = self._gen_full_path(filename)
if not self._check_file_exists(cfg_file):
raise ReplaceConfigException("Candidate config file does not exist")
if self.auto_rollback_on_error:
cmd = 'configure replace {} force revert trigger error'.format(cfg_file)
else:
cmd = 'configure replace {} force'.format(cfg_file)
output = self._commit_hostname_handler(cmd)
if ('original configuration has been successfully restored' in output) or \
('error' in output.lower()) or \
('failed' in output.lower()):
msg = "Candidate config could not be applied\n{}".format(output)
raise ReplaceConfigException(msg)
elif '%Please turn config archive on' in output:
msg = "napalm-ios replace() requires Cisco 'archive' feature to be enabled."
raise ReplaceConfigException(msg)
else:
# Merge operation
filename = self.merge_cfg
cfg_file = self._gen_full_path(filename)
if not self._check_file_exists(cfg_file):
raise MergeConfigException("Merge source config file does not exist")
cmd = 'copy {} running-config'.format(cfg_file)
self._disable_confirm()
output = self._commit_hostname_handler(cmd)
self._enable_confirm()
if 'Invalid input detected' in output:
self.rollback()
err_header = "Configuration merge failed; automatic rollback attempted"
merge_error = "{0}:\n{1}".format(err_header, output)
raise MergeConfigException(merge_error)
# Save config to startup (both replace and merge)
output += self.device.send_command_expect("write mem") |
Set candidate_cfg to current running-config. Erase the merge_cfg file.
def discard_config(self):
"""Set candidate_cfg to current running-config. Erase the merge_cfg file."""
discard_candidate = 'copy running-config {}'.format(self._gen_full_path(self.candidate_cfg))
discard_merge = 'copy null: {}'.format(self._gen_full_path(self.merge_cfg))
self._disable_confirm()
self.device.send_command_expect(discard_candidate)
self.device.send_command_expect(discard_merge)
self._enable_confirm() |
Transfer file to remote device.
By default, this will use Secure Copy if self.inline_transfer is set, then will use
Netmiko InlineTransfer method to transfer inline using either SSH or telnet (plus TCL
onbox).
Return (status, msg)
status = boolean
msg = details on what happened
def _xfer_file(self, source_file=None, source_config=None, dest_file=None, file_system=None,
TransferClass=FileTransfer):
"""Transfer file to remote device.
By default, this will use Secure Copy if self.inline_transfer is set, then will use
Netmiko InlineTransfer method to transfer inline using either SSH or telnet (plus TCL
onbox).
Return (status, msg)
status = boolean
msg = details on what happened
"""
if not source_file and not source_config:
raise ValueError("File source not specified for transfer.")
if not dest_file or not file_system:
raise ValueError("Destination file or file system not specified.")
if source_file:
kwargs = dict(ssh_conn=self.device, source_file=source_file, dest_file=dest_file,
direction='put', file_system=file_system)
elif source_config:
kwargs = dict(ssh_conn=self.device, source_config=source_config, dest_file=dest_file,
direction='put', file_system=file_system)
enable_scp = True
if self.inline_transfer:
enable_scp = False
with TransferClass(**kwargs) as transfer:
# Check if file already exists and has correct MD5
if transfer.check_file_exists() and transfer.compare_md5():
msg = "File already exists and has correct MD5: no SCP needed"
return (True, msg)
if not transfer.verify_space_available():
msg = "Insufficient space available on remote device"
return (False, msg)
if enable_scp:
transfer.enable_scp()
# Transfer file
transfer.transfer_file()
# Compares MD5 between local-remote files
if transfer.verify_file():
msg = "File successfully transferred to remote device"
return (True, msg)
else:
msg = "File transfer to remote device failed"
return (False, msg)
return (False, '') |
Generate full file path on remote device.
def _gen_full_path(self, filename, file_system=None):
"""Generate full file path on remote device."""
if file_system is None:
return '{}/{}'.format(self.dest_file_system, filename)
else:
if ":" not in file_system:
raise ValueError("Invalid file_system specified: {}".format(file_system))
return '{}/{}'.format(file_system, filename) |
Save a configuration that can be used for rollback.
def _gen_rollback_cfg(self):
"""Save a configuration that can be used for rollback."""
cfg_file = self._gen_full_path(self.rollback_cfg)
cmd = 'copy running-config {}'.format(cfg_file)
self._disable_confirm()
self.device.send_command_expect(cmd)
self._enable_confirm() |
Check that the file exists on remote device using full path.
cfg_file is full path i.e. flash:/file_name
For example
# dir flash:/candidate_config.txt
Directory of flash:/candidate_config.txt
33 -rw- 5592 Dec 18 2015 10:50:22 -08:00 candidate_config.txt
return boolean
def _check_file_exists(self, cfg_file):
"""
Check that the file exists on remote device using full path.
cfg_file is full path i.e. flash:/file_name
For example
# dir flash:/candidate_config.txt
Directory of flash:/candidate_config.txt
33 -rw- 5592 Dec 18 2015 10:50:22 -08:00 candidate_config.txt
return boolean
"""
cmd = 'dir {}'.format(cfg_file)
success_pattern = 'Directory of {}'.format(cfg_file)
output = self.device.send_command_expect(cmd)
if 'Error opening' in output:
return False
elif success_pattern in output:
return True
return False |
Obtain the full interface name from the abbreviated name.
Cache mappings in self.interface_map.
def _expand_interface_name(self, interface_brief):
"""
Obtain the full interface name from the abbreviated name.
Cache mappings in self.interface_map.
"""
if self.interface_map.get(interface_brief):
return self.interface_map.get(interface_brief)
command = 'show int {}'.format(interface_brief)
output = self._send_command(command)
first_line = output.splitlines()[0]
if 'line protocol' in first_line:
full_int_name = first_line.split()[0]
self.interface_map[interface_brief] = full_int_name
return self.interface_map.get(interface_brief)
else:
return interface_brief |
IOS implementation of get_lldp_neighbors.
def get_lldp_neighbors(self):
"""IOS implementation of get_lldp_neighbors."""
lldp = {}
command = 'show lldp neighbors'
output = self._send_command(command)
# Check if router supports the command
if '% Invalid input' in output:
return {}
# Process the output to obtain just the LLDP entries
try:
split_output = re.split(r'^Device ID.*$', output, flags=re.M)[1]
split_output = re.split(r'^Total entries displayed.*$', split_output, flags=re.M)[0]
except IndexError:
return {}
split_output = split_output.strip()
for lldp_entry in split_output.splitlines():
# Example, twb-sf-hpsw1 Fa4 120 B 17
try:
device_id, local_int_brief, hold_time, capability, remote_port = lldp_entry.split()
except ValueError:
if len(lldp_entry.split()) == 4:
# Four fields might be long_name or missing capability
capability_missing = True if lldp_entry[46] == ' ' else False
if capability_missing:
device_id, local_int_brief, hold_time, remote_port = lldp_entry.split()
else:
# Might be long_name issue
tmp_field, hold_time, capability, remote_port = lldp_entry.split()
device_id = tmp_field[:20]
local_int_brief = tmp_field[20:]
# device_id might be abbreviated, try to get full name
lldp_tmp = self._lldp_detail_parser(local_int_brief)
device_id_new = lldp_tmp[3][0]
# Verify abbreviated and full name are consistent
if device_id_new[:20] == device_id:
device_id = device_id_new
else:
raise ValueError("Unable to obtain remote device name")
local_port = self._expand_interface_name(local_int_brief)
entry = {'port': remote_port, 'hostname': device_id}
lldp.setdefault(local_port, [])
lldp[local_port].append(entry)
return lldp |
IOS implementation of get_lldp_neighbors_detail.
Calls get_lldp_neighbors.
def get_lldp_neighbors_detail(self, interface=''):
"""
IOS implementation of get_lldp_neighbors_detail.
Calls get_lldp_neighbors.
"""
lldp = {}
lldp_neighbors = self.get_lldp_neighbors()
# Filter to specific interface
if interface:
lldp_data = lldp_neighbors.get(interface)
if lldp_data:
lldp_neighbors = {interface: lldp_data}
else:
lldp_neighbors = {}
for interface in lldp_neighbors:
local_port = interface
lldp_fields = self._lldp_detail_parser(interface)
# Convert any 'not advertised' to 'N/A'
for field in lldp_fields:
for i, value in enumerate(field):
if 'not advertised' in value:
field[i] = 'N/A'
number_entries = len(lldp_fields[0])
# re.findall will return a list. Make sure same number of entries always returned.
for test_list in lldp_fields:
if len(test_list) != number_entries:
raise ValueError("Failure processing show lldp neighbors detail")
# Standardize the fields
port_id, port_description, chassis_id, system_name, system_description, \
system_capabilities, enabled_capabilities, remote_address = lldp_fields
standardized_fields = zip(port_id, port_description, chassis_id, system_name,
system_description, system_capabilities,
enabled_capabilities, remote_address)
lldp.setdefault(local_port, [])
for entry in standardized_fields:
remote_port_id, remote_port_description, remote_chassis_id, remote_system_name, \
remote_system_description, remote_system_capab, remote_enabled_capab, \
remote_mgmt_address = entry
lldp[local_port].append({
'parent_interface': u'N/A',
'remote_port': remote_port_id,
'remote_port_description': remote_port_description,
'remote_chassis_id': remote_chassis_id,
'remote_system_name': remote_system_name,
'remote_system_description': remote_system_description,
'remote_system_capab': remote_system_capab,
'remote_system_enable_capab': remote_enabled_capab})
return lldp |
Return a set of facts from the devices.
def get_facts(self):
"""Return a set of facts from the devices."""
# default values.
vendor = u'Cisco'
uptime = -1
serial_number, fqdn, os_version, hostname, domain_name = ('Unknown',) * 5
# obtain output from device
show_ver = self._send_command('show version')
show_hosts = self._send_command('show hosts')
show_ip_int_br = self._send_command('show ip interface brief')
# uptime/serial_number/IOS version
for line in show_ver.splitlines():
if ' uptime is ' in line:
hostname, uptime_str = line.split(' uptime is ')
uptime = self.parse_uptime(uptime_str)
hostname = hostname.strip()
if 'Processor board ID' in line:
_, serial_number = line.split("Processor board ID ")
serial_number = serial_number.strip()
if re.search(r"Cisco IOS Software", line):
try:
_, os_version = line.split("Cisco IOS Software, ")
except ValueError:
# Handle 'Cisco IOS Software [Denali],'
_, os_version = re.split(r"Cisco IOS Software \[.*?\], ", line)
os_version = os_version.strip()
elif re.search(r"IOS (tm).+Software", line):
_, os_version = line.split("IOS (tm) ")
os_version = os_version.strip()
# Determine domain_name and fqdn
for line in show_hosts.splitlines():
if 'Default domain' in line:
_, domain_name = line.split("Default domain is ")
domain_name = domain_name.strip()
break
if domain_name != 'Unknown' and hostname != 'Unknown':
fqdn = u'{}.{}'.format(hostname, domain_name)
# model filter
try:
match_model = re.search(r"Cisco (.+?) .+bytes of", show_ver, flags=re.IGNORECASE)
model = match_model.group(1)
except AttributeError:
model = u'Unknown'
# interface_list filter
interface_list = []
show_ip_int_br = show_ip_int_br.strip()
for line in show_ip_int_br.splitlines():
if 'Interface ' in line:
continue
interface = line.split()[0]
interface_list.append(interface)
return {
'uptime': uptime,
'vendor': vendor,
'os_version': py23_compat.text_type(os_version),
'serial_number': py23_compat.text_type(serial_number),
'model': py23_compat.text_type(model),
'hostname': py23_compat.text_type(hostname),
'fqdn': fqdn,
'interface_list': interface_list
} |
Get interface details.
last_flapped is not implemented
Example Output:
{ u'Vlan1': { 'description': u'N/A',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100},
u'Vlan100': { 'description': u'Data Network',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100},
u'Vlan200': { 'description': u'Voice Network',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100}}
def get_interfaces(self):
"""
Get interface details.
last_flapped is not implemented
Example Output:
{ u'Vlan1': { 'description': u'N/A',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100},
u'Vlan100': { 'description': u'Data Network',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100},
u'Vlan200': { 'description': u'Voice Network',
'is_enabled': True,
'is_up': True,
'last_flapped': -1.0,
'mac_address': u'a493.4cc1.67a7',
'speed': 100}}
"""
# default values.
last_flapped = -1.0
command = 'show interfaces'
output = self._send_command(command)
interface = description = mac_address = speed = speedformat = ''
is_enabled = is_up = None
interface_dict = {}
for line in output.splitlines():
interface_regex_1 = r"^(\S+?)\s+is\s+(.+?),\s+line\s+protocol\s+is\s+(\S+)"
interface_regex_2 = r"^(\S+)\s+is\s+(up|down)"
for pattern in (interface_regex_1, interface_regex_2):
interface_match = re.search(pattern, line)
if interface_match:
interface = interface_match.group(1)
status = interface_match.group(2)
try:
protocol = interface_match.group(3)
except IndexError:
protocol = ''
if 'admin' in status.lower():
is_enabled = False
else:
is_enabled = True
if protocol:
is_up = bool('up' in protocol)
else:
is_up = bool('up' in status)
break
mac_addr_regex = r"^\s+Hardware.+address\s+is\s+({})".format(MAC_REGEX)
if re.search(mac_addr_regex, line):
mac_addr_match = re.search(mac_addr_regex, line)
mac_address = napalm_base.helpers.mac(mac_addr_match.groups()[0])
descr_regex = "^\s+Description:\s+(.+?)$"
if re.search(descr_regex, line):
descr_match = re.search(descr_regex, line)
description = descr_match.groups()[0]
speed_regex = r"^\s+MTU\s+\d+.+BW\s+(\d+)\s+([KMG]?b)"
if re.search(speed_regex, line):
speed_match = re.search(speed_regex, line)
speed = speed_match.groups()[0]
speedformat = speed_match.groups()[1]
speed = float(speed)
if speedformat.startswith('Kb'):
speed = speed / 1000.0
elif speedformat.startswith('Gb'):
speed = speed * 1000
speed = int(round(speed))
if interface == '':
raise ValueError("Interface attributes were \
found without any known interface")
if not isinstance(is_up, bool) or not isinstance(is_enabled, bool):
raise ValueError("Did not correctly find the interface status")
interface_dict[interface] = {'is_enabled': is_enabled, 'is_up': is_up,
'description': description, 'mac_address': mac_address,
'last_flapped': last_flapped, 'speed': speed}
interface = description = mac_address = speed = speedformat = ''
is_enabled = is_up = None
return interface_dict |
Get interface ip details.
Returns a dict of dicts
Example Output:
{ u'FastEthernet8': { 'ipv4': { u'10.66.43.169': { 'prefix_length': 22}}},
u'Loopback555': { 'ipv4': { u'192.168.1.1': { 'prefix_length': 24}},
'ipv6': { u'1::1': { 'prefix_length': 64},
u'2001:DB8:1::1': { 'prefix_length': 64},
u'2::': { 'prefix_length': 64},
u'FE80::3': { 'prefix_length': 10}}},
u'Tunnel0': { 'ipv4': { u'10.63.100.9': { 'prefix_length': 24}}},
u'Tunnel1': { 'ipv4': { u'10.63.101.9': { 'prefix_length': 24}}},
u'Vlan100': { 'ipv4': { u'10.40.0.1': { 'prefix_length': 24},
u'10.41.0.1': { 'prefix_length': 24},
u'10.65.0.1': { 'prefix_length': 24}}},
u'Vlan200': { 'ipv4': { u'10.63.176.57': { 'prefix_length': 29}}}}
def get_interfaces_ip(self):
"""
Get interface ip details.
Returns a dict of dicts
Example Output:
{ u'FastEthernet8': { 'ipv4': { u'10.66.43.169': { 'prefix_length': 22}}},
u'Loopback555': { 'ipv4': { u'192.168.1.1': { 'prefix_length': 24}},
'ipv6': { u'1::1': { 'prefix_length': 64},
u'2001:DB8:1::1': { 'prefix_length': 64},
u'2::': { 'prefix_length': 64},
u'FE80::3': { 'prefix_length': 10}}},
u'Tunnel0': { 'ipv4': { u'10.63.100.9': { 'prefix_length': 24}}},
u'Tunnel1': { 'ipv4': { u'10.63.101.9': { 'prefix_length': 24}}},
u'Vlan100': { 'ipv4': { u'10.40.0.1': { 'prefix_length': 24},
u'10.41.0.1': { 'prefix_length': 24},
u'10.65.0.1': { 'prefix_length': 24}}},
u'Vlan200': { 'ipv4': { u'10.63.176.57': { 'prefix_length': 29}}}}
"""
interfaces = {}
command = 'show ip interface'
show_ip_interface = self._send_command(command)
command = 'show ipv6 interface'
show_ipv6_interface = self._send_command(command)
INTERNET_ADDRESS = r'\s+(?:Internet address is|Secondary address)'
INTERNET_ADDRESS += r' (?P<ip>{})/(?P<prefix>\d+)'.format(IPV4_ADDR_REGEX)
LINK_LOCAL_ADDRESS = r'\s+IPv6 is enabled, link-local address is (?P<ip>[a-fA-F0-9:]+)'
GLOBAL_ADDRESS = r'\s+(?P<ip>[a-fA-F0-9:]+), subnet is (?:[a-fA-F0-9:]+)/(?P<prefix>\d+)'
interfaces = {}
for line in show_ip_interface.splitlines():
if(len(line.strip()) == 0):
continue
if(line[0] != ' '):
ipv4 = {}
interface_name = line.split()[0]
m = re.match(INTERNET_ADDRESS, line)
if m:
ip, prefix = m.groups()
ipv4.update({ip: {"prefix_length": int(prefix)}})
interfaces[interface_name] = {'ipv4': ipv4}
for line in show_ipv6_interface.splitlines():
if(len(line.strip()) == 0):
continue
if(line[0] != ' '):
ifname = line.split()[0]
ipv6 = {}
if ifname not in interfaces:
interfaces[ifname] = {'ipv6': ipv6}
else:
interfaces[ifname].update({'ipv6': ipv6})
m = re.match(LINK_LOCAL_ADDRESS, line)
if m:
ip = m.group(1)
ipv6.update({ip: {"prefix_length": 10}})
m = re.match(GLOBAL_ADDRESS, line)
if m:
ip, prefix = m.groups()
ipv6.update({ip: {"prefix_length": int(prefix)}})
# Interface without ipv6 doesn't appears in show ipv6 interface
return interfaces |
Convert string time to seconds.
Examples
00:14:23
00:13:40
00:00:21
00:00:13
00:00:49
1d11h
1d17h
1w0d
8w5d
1y28w
never
def bgp_time_conversion(bgp_uptime):
"""
Convert string time to seconds.
Examples
00:14:23
00:13:40
00:00:21
00:00:13
00:00:49
1d11h
1d17h
1w0d
8w5d
1y28w
never
"""
bgp_uptime = bgp_uptime.strip()
uptime_letters = set(['w', 'h', 'd'])
if 'never' in bgp_uptime:
return -1
elif ':' in bgp_uptime:
times = bgp_uptime.split(":")
times = [int(x) for x in times]
hours, minutes, seconds = times
return (hours * 3600) + (minutes * 60) + seconds
# Check if any letters 'w', 'h', 'd' are in the time string
elif uptime_letters & set(bgp_uptime):
form1 = r'(\d+)d(\d+)h' # 1d17h
form2 = r'(\d+)w(\d+)d' # 8w5d
form3 = r'(\d+)y(\d+)w' # 1y28w
match = re.search(form1, bgp_uptime)
if match:
days = int(match.group(1))
hours = int(match.group(2))
return (days * DAY_SECONDS) + (hours * 3600)
match = re.search(form2, bgp_uptime)
if match:
weeks = int(match.group(1))
days = int(match.group(2))
return (weeks * WEEK_SECONDS) + (days * DAY_SECONDS)
match = re.search(form3, bgp_uptime)
if match:
years = int(match.group(1))
weeks = int(match.group(2))
return (years * YEAR_SECONDS) + (weeks * WEEK_SECONDS)
raise ValueError("Unexpected value for BGP uptime string: {}".format(bgp_uptime)) |
BGP neighbor information.
Currently no VRF support. Supports both IPv4 and IPv6.
def get_bgp_neighbors(self):
"""BGP neighbor information.
Currently no VRF support. Supports both IPv4 and IPv6.
"""
supported_afi = ['ipv4', 'ipv6']
bgp_neighbor_data = dict()
bgp_neighbor_data['global'] = {}
# get summary output from device
cmd_bgp_all_sum = 'show bgp all summary'
summary_output = self._send_command(cmd_bgp_all_sum).strip()
# get neighbor output from device
neighbor_output = ''
for afi in supported_afi:
cmd_bgp_neighbor = 'show bgp %s unicast neighbors' % afi
neighbor_output += self._send_command(cmd_bgp_neighbor).strip()
# trailing newline required for parsing
neighbor_output += "\n"
# Regular expressions used for parsing BGP summary
parse_summary = {
'patterns': [
# For address family: IPv4 Unicast
{'regexp': re.compile(r'^For address family: (?P<afi>\S+) '),
'record': False},
# Capture router_id and local_as values, e.g.:
# BGP router identifier 10.0.1.1, local AS number 65000
{'regexp': re.compile(r'^.* router identifier (?P<router_id>{}), '
r'local AS number (?P<local_as>{})'.format(
IPV4_ADDR_REGEX, ASN_REGEX
)),
'record': False},
# Match neighbor summary row, capturing useful details and
# discarding the 5 columns that we don't care about, e.g.:
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
# 10.0.0.2 4 65000 1336020 64337701 1011343614 0 0 8w0d 3143
{'regexp': re.compile(r'^\*?(?P<remote_addr>({})|({}))'
r'\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+'
r'(?P<uptime>(never)|\d+\S+)'
r'\s+(?P<accepted_prefixes>\d+)'.format(
IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX
)),
'record': True},
# Same as above, but for peer that are not Established, e.g.:
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
# 192.168.0.2 4 65002 0 0 1 0 0 never Active
{'regexp': re.compile(r'^\*?(?P<remote_addr>({})|({}))'
r'\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+'
r'(?P<uptime>(never)|\d+\S+)\s+(?P<state>\D.*)'.format(
IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX
)),
'record': True},
# ipv6 peers often break accross rows because of the longer peer address,
# match as above, but in separate expressions, e.g.:
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
# 2001:DB8::4
# 4 65004 9900690 612449 155362939 0 0 26w6d 36391
{'regexp': re.compile(r'^\*?(?P<remote_addr>({})|({}))'.format(
IPV4_ADDR_REGEX, IPV6_ADDR_REGEX
)),
'record': False},
{'regexp': re.compile(r'^\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+'
r'(?P<uptime>(never)|\d+\S+)'
r'\s+(?P<accepted_prefixes>\d+)'.format(
ASN_REGEX
)),
'record': True},
# Same as above, but for peers that are not Established, e.g.:
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
# 2001:DB8::3
# 4 65003 0 0 1 0 0 never Idle (Admin)
{'regexp': re.compile(r'^\s+\d+\s+(?P<remote_as>{})(\s+\S+){{5}}\s+'
r'(?P<uptime>(never)|\d+\S+)\s+(?P<state>\D.*)'.format(
ASN_REGEX
)),
'record': True}
],
'no_fill_fields': ['accepted_prefixes', 'state', 'uptime', 'remote_as', 'remote_addr']
}
parse_neighbors = {
'patterns': [
# Capture BGP neighbor is 10.0.0.2, remote AS 65000, internal link
{'regexp': re.compile(r'^BGP neighbor is (?P<remote_addr>({})|({})),'
r'\s+remote AS (?P<remote_as>{}).*'.format(
IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX
)),
'record': False},
# Capture description
{'regexp': re.compile(r'^\s+Description: (?P<description>.+)'),
'record': False},
# Capture remote_id, e.g.:
# BGP version 4, remote router ID 10.0.1.2
{'regexp': re.compile(r'^\s+BGP version \d+, remote router ID '
r'(?P<remote_id>{})'.format(IPV4_ADDR_REGEX)),
'record': False},
# Capture AFI and SAFI names, e.g.:
# For address family: IPv4 Unicast
{'regexp': re.compile(r'^\s+For address family: (?P<afi>\S+) '),
'record': False},
# Capture current sent and accepted prefixes, e.g.:
# Prefixes Current: 637213 3142 (Consumes 377040 bytes)
{'regexp': re.compile(r'^\s+Prefixes Current:\s+(?P<sent_prefixes>\d+)\s+'
r'(?P<accepted_prefixes>\d+).*'),
'record': False},
# Capture received_prefixes if soft-reconfig is enabled for the peer
{'regexp': re.compile(r'^\s+Saved (soft-reconfig):.+(?P<received_prefixes>\d+).*'),
'record': True},
# Otherwise, use the following as an end of row marker
{'regexp': re.compile(r'^\s+Local Policy Denied Prefixes:.+'),
'record': True}
],
# fields that should not be "filled down" across table rows
'no_fill_fields': ['received_prefixes', 'accepted_prefixes', 'sent_prefixes']
}
# Parse outputs into a list of dicts
summary_data = []
summary_data_entry = {}
for line in summary_output.splitlines():
# check for matches against each pattern
for item in parse_summary['patterns']:
match = item['regexp'].match(line)
if match:
# a match was found, so update the temp entry with the match's groupdict
summary_data_entry.update(match.groupdict())
if item['record']:
# Record indicates the last piece of data has been obtained; move
# on to next entry
summary_data.append(copy.deepcopy(summary_data_entry))
# remove keys that are listed in no_fill_fields before the next pass
for field in parse_summary['no_fill_fields']:
try:
del summary_data_entry[field]
except KeyError:
pass
break
neighbor_data = []
neighbor_data_entry = {}
for line in neighbor_output.splitlines():
# check for matches against each pattern
for item in parse_neighbors['patterns']:
match = item['regexp'].match(line)
if match:
# a match was found, so update the temp entry with the match's groupdict
neighbor_data_entry.update(match.groupdict())
if item['record']:
# Record indicates the last piece of data has been obtained; move
# on to next entry
neighbor_data.append(copy.deepcopy(neighbor_data_entry))
# remove keys that are listed in no_fill_fields before the next pass
for field in parse_neighbors['no_fill_fields']:
try:
del neighbor_data_entry[field]
except KeyError:
pass
break
router_id = None
for entry in summary_data:
if not router_id:
router_id = entry['router_id']
elif entry['router_id'] != router_id:
raise ValueError
# check the router_id looks like an ipv4 address
router_id = napalm_base.helpers.ip(router_id, version=4)
# add parsed data to output dict
bgp_neighbor_data['global']['router_id'] = router_id
bgp_neighbor_data['global']['peers'] = {}
for entry in summary_data:
remote_addr = napalm_base.helpers.ip(entry['remote_addr'])
afi = entry['afi'].lower()
# check that we're looking at a supported afi
if afi not in supported_afi:
continue
# get neighbor_entry out of neighbor data
neighbor_entry = None
for neighbor in neighbor_data:
if (neighbor['afi'].lower() == afi and
napalm_base.helpers.ip(neighbor['remote_addr']) == remote_addr):
neighbor_entry = neighbor
break
if not isinstance(neighbor_entry, dict):
raise ValueError(msg="Couldn't find neighbor data for %s in afi %s" %
(remote_addr, afi))
# check for admin down state
try:
if "(Admin)" in entry['state']:
is_enabled = False
else:
is_enabled = True
except KeyError:
is_enabled = True
# parse uptime value
uptime = self.bgp_time_conversion(entry['uptime'])
# Uptime should be -1 if BGP session not up
is_up = True if uptime >= 0 else False
# check whether session is up for address family and get prefix count
try:
accepted_prefixes = int(entry['accepted_prefixes'])
except (ValueError, KeyError):
accepted_prefixes = -1
# Only parse neighbor detailed data if BGP session is-up
if is_up:
try:
# overide accepted_prefixes with neighbor data if possible (since that's newer)
accepted_prefixes = int(neighbor_entry['accepted_prefixes'])
except (ValueError, KeyError):
pass
# try to get received prefix count, otherwise set to accepted_prefixes
received_prefixes = neighbor_entry.get('received_prefixes', accepted_prefixes)
# try to get sent prefix count and convert to int, otherwise set to -1
sent_prefixes = int(neighbor_entry.get('sent_prefixes', -1))
else:
received_prefixes = -1
sent_prefixes = -1
# get description
try:
description = py23_compat.text_type(neighbor_entry['description'])
except KeyError:
description = ''
# check the remote router_id looks like an ipv4 address
remote_id = napalm_base.helpers.ip(neighbor_entry['remote_id'], version=4)
if remote_addr not in bgp_neighbor_data['global']['peers']:
bgp_neighbor_data['global']['peers'][remote_addr] = {
'local_as': napalm_base.helpers.as_number(entry['local_as']),
'remote_as': napalm_base.helpers.as_number(entry['remote_as']),
'remote_id': remote_id,
'is_up': is_up,
'is_enabled': is_enabled,
'description': description,
'uptime': uptime,
'address_family': {
afi: {
'received_prefixes': received_prefixes,
'accepted_prefixes': accepted_prefixes,
'sent_prefixes': sent_prefixes
}
}
}
else:
# found previous data for matching remote_addr, but for different afi
existing = bgp_neighbor_data['global']['peers'][remote_addr]
assert afi not in existing['address_family']
# compare with existing values and croak if they don't match
assert existing['local_as'] == napalm_base.helpers.as_number(entry['local_as'])
assert existing['remote_as'] == napalm_base.helpers.as_number(entry['remote_as'])
assert existing['remote_id'] == remote_id
assert existing['is_enabled'] == is_enabled
assert existing['description'] == description
# merge other values in a sane manner
existing['is_up'] = existing['is_up'] or is_up
existing['uptime'] = max(existing['uptime'], uptime)
existing['address_family'][afi] = {
'received_prefixes': received_prefixes,
'accepted_prefixes': accepted_prefixes,
'sent_prefixes': sent_prefixes
}
return bgp_neighbor_data |
Get environment facts.
power and fan are currently not implemented
cpu is using 1-minute average
cpu hard-coded to cpu0 (i.e. only a single CPU)
def get_environment(self):
"""
Get environment facts.
power and fan are currently not implemented
cpu is using 1-minute average
cpu hard-coded to cpu0 (i.e. only a single CPU)
"""
environment = {}
cpu_cmd = 'show proc cpu'
mem_cmd = 'show memory statistics'
temp_cmd = 'show env temperature status'
output = self._send_command(cpu_cmd)
environment.setdefault('cpu', {})
environment['cpu'][0] = {}
environment['cpu'][0]['%usage'] = 0.0
for line in output.splitlines():
if 'CPU utilization' in line:
# CPU utilization for five seconds: 2%/0%; one minute: 2%; five minutes: 1%
cpu_regex = r'^.*one minute: (\d+)%; five.*$'
match = re.search(cpu_regex, line)
environment['cpu'][0]['%usage'] = float(match.group(1))
break
output = self._send_command(mem_cmd)
for line in output.splitlines():
if 'Processor' in line:
_, _, _, proc_used_mem, proc_free_mem = line.split()[:5]
elif 'I/O' in line or 'io' in line:
_, _, _, io_used_mem, io_free_mem = line.split()[:5]
used_mem = int(proc_used_mem) + int(io_used_mem)
free_mem = int(proc_free_mem) + int(io_free_mem)
environment.setdefault('memory', {})
environment['memory']['used_ram'] = used_mem
environment['memory']['available_ram'] = free_mem
environment.setdefault('temperature', {})
# The 'show env temperature status' is not ubiquitous in Cisco IOS
output = self._send_command(temp_cmd)
if '% Invalid' not in output:
for line in output.splitlines():
if 'System Temperature Value' in line:
system_temp = float(line.split(':')[1].split()[0])
elif 'Yellow Threshold' in line:
system_temp_alert = float(line.split(':')[1].split()[0])
elif 'Red Threshold' in line:
system_temp_crit = float(line.split(':')[1].split()[0])
env_value = {'is_alert': system_temp >= system_temp_alert,
'is_critical': system_temp >= system_temp_crit, 'temperature': system_temp}
environment['temperature']['system'] = env_value
else:
env_value = {'is_alert': False, 'is_critical': False, 'temperature': -1.0}
environment['temperature']['invalid'] = env_value
# Initialize 'power' and 'fan' to default values (not implemented)
environment.setdefault('power', {})
environment['power']['invalid'] = {'status': True, 'output': -1.0, 'capacity': -1.0}
environment.setdefault('fans', {})
environment['fans']['invalid'] = {'status': True}
return environment |
Get arp table information.
Return a list of dictionaries having the following set of keys:
* interface (string)
* mac (string)
* ip (string)
* age (float)
For example::
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
]
def get_arp_table(self):
"""
Get arp table information.
Return a list of dictionaries having the following set of keys:
* interface (string)
* mac (string)
* ip (string)
* age (float)
For example::
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
]
"""
arp_table = []
command = 'show arp | exclude Incomplete'
output = self._send_command(command)
# Skip the first line which is a header
output = output.split('\n')
output = output[1:]
for line in output:
if len(line) == 0:
return {}
if len(line.split()) == 5:
# Static ARP entries have no interface
# Internet 10.0.0.1 - 0010.2345.1cda ARPA
interface = ''
protocol, address, age, mac, eth_type = line.split()
elif len(line.split()) == 6:
protocol, address, age, mac, eth_type, interface = line.split()
else:
raise ValueError("Unexpected output from: {}".format(line.split()))
try:
if age == '-':
age = 0
age = float(age)
except ValueError:
raise ValueError("Unable to convert age value to float: {}".format(age))
# Validate we matched correctly
if not re.search(RE_IPADDR, address):
raise ValueError("Invalid IP Address detected: {}".format(address))
if not re.search(RE_MAC, mac):
raise ValueError("Invalid MAC Address detected: {}".format(mac))
entry = {
'interface': interface,
'mac': napalm_base.helpers.mac(mac),
'ip': address,
'age': age
}
arp_table.append(entry)
return arp_table |
Execute a list of commands and return the output in a dictionary format using the command
as the key.
Example input:
['show clock', 'show calendar']
Output example:
{ 'show calendar': u'22:02:01 UTC Thu Feb 18 2016',
'show clock': u'*22:01:51.165 UTC Thu Feb 18 2016'}
def cli(self, commands):
"""
Execute a list of commands and return the output in a dictionary format using the command
as the key.
Example input:
['show clock', 'show calendar']
Output example:
{ 'show calendar': u'22:02:01 UTC Thu Feb 18 2016',
'show clock': u'*22:01:51.165 UTC Thu Feb 18 2016'}
"""
cli_output = dict()
if type(commands) is not list:
raise TypeError('Please enter a valid list of commands!')
for command in commands:
output = self._send_command(command)
if 'Invalid input detected' in output:
raise ValueError('Unable to execute command "{}"'.format(command))
cli_output.setdefault(command, {})
cli_output[command] = output
return cli_output |
Returns a lists of dictionaries. Each dictionary represents an entry in the MAC Address
Table, having the following keys
* mac (string)
* interface (string)
* vlan (int)
* active (boolean)
* static (boolean)
* moves (int)
* last_move (float)
Format1:
Destination Address Address Type VLAN Destination Port
------------------- ------------ ---- --------------------
6400.f1cf.2cc6 Dynamic 1 Wlan-GigabitEthernet0
Cat 6500:
Legend: * - primary entry
age - seconds since last seen
n/a - not available
vlan mac address type learn age ports
------+----------------+--------+-----+----------+--------------------------
* 999 1111.2222.3333 dynamic Yes 0 Port-channel1
999 1111.2222.3333 dynamic Yes 0 Port-channel1
Cat 4948
Unicast Entries
vlan mac address type protocols port
-------+---------------+--------+---------------------+--------------------
999 1111.2222.3333 dynamic ip Port-channel1
Cat 2960
Mac Address Table
-------------------------------------------
Vlan Mac Address Type Ports
---- ----------- -------- -----
All 1111.2222.3333 STATIC CPU
def get_mac_address_table(self):
"""
Returns a lists of dictionaries. Each dictionary represents an entry in the MAC Address
Table, having the following keys
* mac (string)
* interface (string)
* vlan (int)
* active (boolean)
* static (boolean)
* moves (int)
* last_move (float)
Format1:
Destination Address Address Type VLAN Destination Port
------------------- ------------ ---- --------------------
6400.f1cf.2cc6 Dynamic 1 Wlan-GigabitEthernet0
Cat 6500:
Legend: * - primary entry
age - seconds since last seen
n/a - not available
vlan mac address type learn age ports
------+----------------+--------+-----+----------+--------------------------
* 999 1111.2222.3333 dynamic Yes 0 Port-channel1
999 1111.2222.3333 dynamic Yes 0 Port-channel1
Cat 4948
Unicast Entries
vlan mac address type protocols port
-------+---------------+--------+---------------------+--------------------
999 1111.2222.3333 dynamic ip Port-channel1
Cat 2960
Mac Address Table
-------------------------------------------
Vlan Mac Address Type Ports
---- ----------- -------- -----
All 1111.2222.3333 STATIC CPU
"""
RE_MACTABLE_DEFAULT = r"^" + MAC_REGEX
RE_MACTABLE_6500_1 = r"^\*\s+{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 7 fields
RE_MACTABLE_6500_2 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 6 fields
RE_MACTABLE_6500_3 = r"^\s{51}\S+" # Fill down from prior
RE_MACTABLE_4500_1 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 5 fields
RE_MACTABLE_4500_2 = r"^\s{32}\S+" # Fill down from prior
RE_MACTABLE_2960_1 = r"^All\s+{}".format(MAC_REGEX)
RE_MACTABLE_GEN_1 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 4 fields (2960/4500)
def process_mac_fields(vlan, mac, mac_type, interface):
"""Return proper data for mac address fields."""
if mac_type.lower() in ['self', 'static', 'system']:
static = True
if vlan.lower() == 'all':
vlan = 0
if interface.lower() == 'cpu' or re.search(r'router', interface.lower()) or \
re.search(r'switch', interface.lower()):
interface = ''
else:
static = False
if mac_type.lower() in ['dynamic']:
active = True
else:
active = False
return {
'mac': napalm_base.helpers.mac(mac),
'interface': interface,
'vlan': int(vlan),
'static': static,
'active': active,
'moves': -1,
'last_move': -1.0
}
mac_address_table = []
command = IOS_COMMANDS['show_mac_address']
output = self._send_command(command)
# Skip the header lines
output = re.split(r'^----.*', output, flags=re.M)[1:]
output = "\n".join(output).strip()
# Strip any leading astericks
output = re.sub(r"^\*", "", output, flags=re.M)
fill_down_vlan = fill_down_mac = fill_down_mac_type = ''
for line in output.splitlines():
# Cat6500 one off anf 4500 multicast format
if (re.search(RE_MACTABLE_6500_3, line) or re.search(RE_MACTABLE_4500_2, line)):
interface = line.strip()
if ',' in interface:
interfaces = interface.split(',')
else:
interfaces = []
interfaces.append(interface)
for single_interface in interfaces:
mac_address_table.append(process_mac_fields(fill_down_vlan, fill_down_mac,
fill_down_mac_type,
single_interface))
continue
line = line.strip()
if line == '':
continue
if re.search(r"^---", line):
# Convert any '---' to VLAN 0
line = re.sub(r"^---", "0", line, flags=re.M)
# Format1
if re.search(RE_MACTABLE_DEFAULT, line):
if len(line.split()) == 4:
mac, mac_type, vlan, interface = line.split()
mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface))
else:
raise ValueError("Unexpected output from: {}".format(line.split()))
# Cat6500 format
elif (re.search(RE_MACTABLE_6500_1, line) or re.search(RE_MACTABLE_6500_2, line)) and \
len(line.split()) >= 6:
if len(line.split()) == 7:
_, vlan, mac, mac_type, _, _, interface = line.split()
elif len(line.split()) == 6:
vlan, mac, mac_type, _, _, interface = line.split()
if ',' in interface:
interfaces = interface.split(',')
fill_down_vlan = vlan
fill_down_mac = mac
fill_down_mac_type = mac_type
for single_interface in interfaces:
mac_address_table.append(process_mac_fields(vlan, mac, mac_type,
single_interface))
else:
mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface))
# Cat4500 format
elif re.search(RE_MACTABLE_4500_1, line) and len(line.split()) == 5:
vlan, mac, mac_type, _, interface = line.split()
mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface))
# Cat2960 format - ignore extra header line
elif re.search(r"^Vlan\s+Mac Address\s+", line):
continue
# Cat2960 format (Cat4500 format multicast entries)
elif (re.search(RE_MACTABLE_2960_1, line) or re.search(RE_MACTABLE_GEN_1, line)) and \
len(line.split()) == 4:
vlan, mac, mac_type, interface = line.split()
if ',' in interface:
interfaces = interface.split(',')
fill_down_vlan = vlan
fill_down_mac = mac
fill_down_mac_type = mac_type
for single_interface in interfaces:
mac_address_table.append(process_mac_fields(vlan, mac, mac_type,
single_interface))
else:
mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface))
elif re.search(r"Total Mac Addresses", line):
continue
elif re.search(r"Multicast Entries", line):
continue
elif re.search(r"vlan.*mac.*address.*type.*", line):
continue
else:
raise ValueError("Unexpected output from: {}".format(repr(line)))
return mac_address_table |
Executes traceroute on the device and returns a dictionary with the result.
:param destination: Host or IP Address of the destination
:param source (optional): Use a specific IP Address to execute the traceroute
:param ttl (optional): Maimum number of hops -> int (0-255)
:param timeout (optional): Number of seconds to wait for response -> int (1-3600)
Output dictionary has one of the following keys:
* success
* error
In case of success, the keys of the dictionary represent the hop ID, while values are
dictionaries containing the probes results:
* rtt (float)
* ip_address (str)
* host_name (str)
def traceroute(self, destination, source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL, timeout=C.TRACEROUTE_TIMEOUT, vrf=C.TRACEROUTE_VRF):
"""
Executes traceroute on the device and returns a dictionary with the result.
:param destination: Host or IP Address of the destination
:param source (optional): Use a specific IP Address to execute the traceroute
:param ttl (optional): Maimum number of hops -> int (0-255)
:param timeout (optional): Number of seconds to wait for response -> int (1-3600)
Output dictionary has one of the following keys:
* success
* error
In case of success, the keys of the dictionary represent the hop ID, while values are
dictionaries containing the probes results:
* rtt (float)
* ip_address (str)
* host_name (str)
"""
# vrf needs to be right after the traceroute command
if vrf:
command = "traceroute vrf {} {}".format(vrf, destination)
else:
command = "traceroute {}".format(destination)
if source:
command += " source {}".format(source)
if ttl:
if isinstance(ttl, int) and 0 <= timeout <= 255:
command += " ttl 0 {}".format(str(ttl))
if timeout:
# Timeout should be an integer between 1 and 3600
if isinstance(timeout, int) and 1 <= timeout <= 3600:
command += " timeout {}".format(str(timeout))
# Calculation to leave enough time for traceroute to complete assumes send_command
# delay of .2 seconds.
max_loops = (5 * ttl * timeout) + 150
if max_loops < 500: # Make sure max_loops isn't set artificially low
max_loops = 500
output = self.device.send_command(command, max_loops=max_loops)
# Prepare return dict
traceroute_dict = dict()
if re.search('Unrecognized host or address', output):
traceroute_dict['error'] = 'unknown host %s' % destination
return traceroute_dict
else:
traceroute_dict['success'] = dict()
results = dict()
# Find all hops
hops = re.findall('\\n\s+[0-9]{1,3}\s', output)
for hop in hops:
# Search for hop in the output
hop_match = re.search(hop, output)
# Find the start index for hop
start_index = hop_match.start()
# If this is last hop
if hops.index(hop) + 1 == len(hops):
# Set the stop index for hop to len of output
stop_index = len(output)
# else, find the start index for next hop
else:
next_hop_match = re.search(hops[hops.index(hop) + 1], output)
stop_index = next_hop_match.start()
# Now you have the start and stop index for each hop
# and you can parse the probes
# Set the hop_variable, and remove spaces between msec for easier matching
hop_string = output[start_index:stop_index].replace(' msec', 'msec')
hop_list = hop_string.split()
current_hop = int(hop_list.pop(0))
# Prepare dictionary for each hop (assuming there are 3 probes in each hop)
results[current_hop] = dict()
results[current_hop]['probes'] = dict()
results[current_hop]['probes'][1] = {'rtt': float(),
'ip_address': '',
'host_name': ''}
results[current_hop]['probes'][2] = {'rtt': float(),
'ip_address': '',
'host_name': ''}
results[current_hop]['probes'][3] = {'rtt': float(),
'ip_address': '',
'host_name': ''}
current_probe = 1
ip_address = ''
host_name = ''
while hop_list:
current_element = hop_list.pop(0)
# If current_element is * move index in dictionary to next probe
if current_element == '*':
current_probe += 1
# If current_element contains msec record the entry for probe
elif 'msec' in current_element:
ip_address = py23_compat.text_type(ip_address)
host_name = py23_compat.text_type(host_name)
rtt = float(current_element.replace('msec', ''))
results[current_hop]['probes'][current_probe]['ip_address'] = ip_address
results[current_hop]['probes'][current_probe]['host_name'] = host_name
results[current_hop]['probes'][current_probe]['rtt'] = rtt
# After recording the entry move the index to next probe
current_probe += 1
# If element contains '(' and ')', the output format is 'FQDN (IP_ADDRESS)'
# Save the IP address
elif '(' in current_element:
ip_address = current_element.replace('(', '').replace(')', '')
# Save the probe's ip_address and host_name
else:
host_name = current_element
ip_address = current_element
traceroute_dict['success'] = results
return traceroute_dict |
Implementation of get_config for IOS.
Returns the startup or/and running configuration as dictionary.
The keys of the dictionary represent the type of configuration
(startup or running). The candidate is always empty string,
since IOS does not support candidate configuration.
def get_config(self, retrieve='all'):
"""Implementation of get_config for IOS.
Returns the startup or/and running configuration as dictionary.
The keys of the dictionary represent the type of configuration
(startup or running). The candidate is always empty string,
since IOS does not support candidate configuration.
"""
configs = {
'startup': '',
'running': '',
'candidate': '',
}
if retrieve in ('startup', 'all'):
command = 'show startup-config'
output = self._send_command(command)
configs['startup'] = output
if retrieve in ('running', 'all'):
command = 'show running-config'
output = self._send_command(command)
configs['running'] = output
return configs |
Set the range of the accelerometer to the provided value. Range value
should be one of these constants:
- ADXL345_RANGE_2_G = +/-2G
- ADXL345_RANGE_4_G = +/-4G
- ADXL345_RANGE_8_G = +/-8G
- ADXL345_RANGE_16_G = +/-16G
def set_range(self, value):
"""Set the range of the accelerometer to the provided value. Range value
should be one of these constants:
- ADXL345_RANGE_2_G = +/-2G
- ADXL345_RANGE_4_G = +/-4G
- ADXL345_RANGE_8_G = +/-8G
- ADXL345_RANGE_16_G = +/-16G
"""
# Read the data format register to preserve bits. Update the data
# rate, make sure that the FULL-RES bit is enabled for range scaling
format_reg = self._device.readU8(ADXL345_REG_DATA_FORMAT) & ~0x0F
format_reg |= value
format_reg |= 0x08 # FULL-RES bit enabled
# Write the updated format register.
self._device.write8(ADXL345_REG_DATA_FORMAT, format_reg) |
Read the current value of the accelerometer and return it as a tuple
of signed 16-bit X, Y, Z axis values.
def read(self):
"""Read the current value of the accelerometer and return it as a tuple
of signed 16-bit X, Y, Z axis values.
"""
raw = self._device.readList(ADXL345_REG_DATAX0, 6)
return struct.unpack('<hhh', raw) |
Stops interrupts on all boards. Only required when using
:func:`digital_read` and :func:`digital_write`.
:param bus: SPI bus /dev/spidev<bus>.<chipselect> (default: {bus})
:type bus: int
:param chip_select: SPI chip select /dev/spidev<bus>.<chipselect>
(default: {chip})
:type chip_select: int
def deinit(bus=DEFAULT_SPI_BUS,
chip_select=DEFAULT_SPI_CHIP_SELECT):
"""Stops interrupts on all boards. Only required when using
:func:`digital_read` and :func:`digital_write`.
:param bus: SPI bus /dev/spidev<bus>.<chipselect> (default: {bus})
:type bus: int
:param chip_select: SPI chip select /dev/spidev<bus>.<chipselect>
(default: {chip})
:type chip_select: int
"""
global _pifacedigitals
for pfd in _pifacedigitals:
try:
pfd.deinit_board()
except AttributeError:
pass |
Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
def digital_write(pin_num, value, hardware_addr=0):
"""Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
"""
_get_pifacedigital(hardware_addr).output_pins[pin_num].value = value |
Writes the value to the input pullup specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``gppub`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> hex(pfd.gppub.value)
0xff
>>> pfd.gppub.bits[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
def digital_write_pullup(pin_num, value, hardware_addr=0):
"""Writes the value to the input pullup specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``gppub`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> hex(pfd.gppub.value)
0xff
>>> pfd.gppub.bits[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
"""
_get_pifacedigital(hardware_addr).gppub.bits[pin_num].value = value |
Returns this computers IP address as a string.
def get_my_ip():
"""Returns this computers IP address as a string."""
ip = subprocess.check_output(GET_IP_CMD, shell=True).decode('utf-8')[:-1]
return ip.strip() |
Sets the output port value to new_value, defaults to old_value.
def set_output_port(self, new_value, old_value=0):
"""Sets the output port value to new_value, defaults to old_value."""
print("Setting output port to {}.".format(new_value))
port_value = old_value
try:
port_value = int(new_value) # dec
except ValueError:
port_value = int(new_value, 16) # hex
finally:
self.pifacedigital.output_port.value = port_value
return port_value |
Wrap the calls the url, with the given arguments.
:param str url: Url to call with the given arguments
:param str method: [POST | GET] Method to use on the request
:param int status: Expected status code
def _request_api(self, **kwargs):
"""Wrap the calls the url, with the given arguments.
:param str url: Url to call with the given arguments
:param str method: [POST | GET] Method to use on the request
:param int status: Expected status code
"""
_url = kwargs.get('url')
_method = kwargs.get('method', 'GET')
_status = kwargs.get('status', 200)
counter = 0
if _method not in ['GET', 'POST']:
raise ValueError('Method is not GET or POST')
while True:
try:
res = REQ[_method](_url, cookies=self._cookie)
if res.status_code == _status:
break
else:
raise BadStatusException(res.content)
except requests.exceptions.BaseHTTPError:
if counter < self._retries:
counter += 1
continue
raise MaxRetryError
self._last_result = res
return res |
Get info about a user based on his id.
:return: JSON
def get_infos_with_id(self, uid):
"""Get info about a user based on his id.
:return: JSON
"""
_logid = uid
_user_info_url = USER_INFO_URL.format(logid=_logid)
return self._request_api(url=_user_info_url).json() |
Get the current activities of user.
Either use the `login` param, or the client's login if unset.
:return: JSON
def get_current_activities(self, login=None, **kwargs):
"""Get the current activities of user.
Either use the `login` param, or the client's login if unset.
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_activity_url = ACTIVITY_URL.format(login=_login)
return self._request_api(url=_activity_url).json() |
Get the current notifications of a user.
:return: JSON
def get_notifications(self, login=None, **kwargs):
"""Get the current notifications of a user.
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_notif_url = NOTIF_URL.format(login=_login)
return self._request_api(url=_notif_url).json() |
Get a user's grades on a single promotion based on his login.
Either use the `login` param, or the client's login if unset.
:return: JSON
def get_grades(self, login=None, promotion=None, **kwargs):
"""Get a user's grades on a single promotion based on his login.
Either use the `login` param, or the client's login if unset.
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_promotion_id = kwargs.get('promotion', promotion)
_grades_url = GRADES_URL.format(login=_login, promo_id=_promotion_id)
return self._request_api(url=_grades_url).json() |
Get a user's picture.
:param str login: Login of the user to check
:return: JSON
def get_picture(self, login=None, **kwargs):
"""Get a user's picture.
:param str login: Login of the user to check
:return: JSON
"""
_login = kwargs.get(
'login',
login or self._login
)
_activities_url = PICTURE_URL.format(login=_login)
return self._request_api(url=_activities_url).content |
Get a user's project.
:param str login: User's login (Default: self._login)
:return: JSON
def get_projects(self, **kwargs):
"""Get a user's project.
:param str login: User's login (Default: self._login)
:return: JSON
"""
_login = kwargs.get('login', self._login)
search_url = SEARCH_URL.format(login=_login)
return self._request_api(url=search_url).json() |
Get the related activities of a project.
:param str module: Stages of a given module
:return: JSON
def get_activities_for_project(self, module=None, **kwargs):
"""Get the related activities of a project.
:param str module: Stages of a given module
:return: JSON
"""
_module_id = kwargs.get('module', module)
_activities_url = ACTIVITIES_URL.format(module_id=_module_id)
return self._request_api(url=_activities_url).json() |
Get groups for activity.
:param str module: Base module
:param str module: Project which contains the group requested
:return: JSON
def get_group_for_activity(self, module=None, project=None, **kwargs):
"""Get groups for activity.
:param str module: Base module
:param str module: Project which contains the group requested
:return: JSON
"""
_module_id = kwargs.get('module', module)
_project_id = kwargs.get('project', project)
_url = GROUPS_URL.format(module_id=_module_id, project_id=_project_id)
return self._request_api(url=_url).json() |
Get users by promotion id.
:param int promotion: Promotion ID
:return: JSON
def get_students(self, **kwargs):
"""Get users by promotion id.
:param int promotion: Promotion ID
:return: JSON
"""
_promotion_id = kwargs.get('promotion')
_url = PROMOTION_URL.format(promo_id=_promotion_id)
return self._request_api(url=_url).json() |
Get a user's log events.
:param str login: User's login (Default: self._login)
:return: JSON
def get_log_events(self, login=None, **kwargs):
"""Get a user's log events.
:param str login: User's login (Default: self._login)
:return: JSON
"""
_login = kwargs.get(
'login',
login
)
log_events_url = GSA_EVENTS_URL.format(login=_login)
return self._request_api(url=log_events_url).json() |
Get a user's events.
:param str login: User's login (Default: self._login)
:param str start_date: Start date
:param str end_date: To date
:return: JSON
def get_events(self, login=None, start_date=None, end_date=None, **kwargs):
"""Get a user's events.
:param str login: User's login (Default: self._login)
:param str start_date: Start date
:param str end_date: To date
:return: JSON
"""
_login = kwargs.get(
'login',
login
)
log_events_url = EVENTS_URL.format(
login=_login,
start_date=start_date,
end_date=end_date,
)
return self._request_api(url=log_events_url).json() |
Get a user's logs.
:param str login: User's login (Default: self._login)
:return: JSON
def get_logs(self, login=None, **kwargs):
"""Get a user's logs.
:param str login: User's login (Default: self._login)
:return: JSON
"""
_login = kwargs.get(
'login',
login
)
log_events_url = GSA_LOGS_URL.format(login=_login)
return self._request_api(url=log_events_url).json() |
Process headers dict to return the format class
(not the instance)
def negotiate(cls, headers):
""" Process headers dict to return the format class
(not the instance)
"""
# set lower keys
headers = {k.lower(): v for k, v in headers.items()}
accept = headers.get('accept', "*/*")
parsed_accept = accept.split(";")
parsed_accept = [i.strip() for i in parsed_accept]
# Protobuffer (only one version)
if all([i in parsed_accept for i in cls.PROTOBUF['default']]):
return ProtobufFormat
elif all([i in parsed_accept for i in cls.PROTOBUF['text']]):
return ProtobufTextFormat
# Text 0.0.4
elif all([i in parsed_accept for i in cls.TEXT['0.0.4']]):
return TextFormat
# Text (Default)
elif all([i in parsed_accept for i in cls.TEXT['default']]):
return TextFormat
# Default
else:
return cls.FALLBACK |
Registers a collector
def register(self, collector):
""" Registers a collector"""
if not isinstance(collector, Collector):
raise TypeError(
"Can't register instance, not a valid type of collector")
if collector.name in self.collectors:
raise ValueError("Collector already exists or name colision")
with mutex:
self.collectors[collector.name] = collector |
Add works like replace, but only previously pushed metrics with the
same name (and the same job and instance) will be replaced.
(It uses HTTP method 'POST' to push to the Pushgateway.)
def add(self, registry):
""" Add works like replace, but only previously pushed metrics with the
same name (and the same job and instance) will be replaced.
(It uses HTTP method 'POST' to push to the Pushgateway.)
"""
# POST
payload = self.formatter.marshall(registry)
r = requests.post(self.path, data=payload, headers=self.headers) |
Push triggers a metric collection and pushes all collected metrics
to the Pushgateway specified by addr
Note that all previously pushed metrics with the same job and
instance will be replaced with the metrics pushed by this call.
(It uses HTTP method 'PUT' to push to the Pushgateway.)
def replace(self, registry):
""" Push triggers a metric collection and pushes all collected metrics
to the Pushgateway specified by addr
Note that all previously pushed metrics with the same job and
instance will be replaced with the metrics pushed by this call.
(It uses HTTP method 'PUT' to push to the Pushgateway.)
"""
# PUT
payload = self.formatter.marshall(registry)
r = requests.put(self.path, data=payload, headers=self.headers) |
Marshalls a collector and returns the storage/transfer format in
a tuple, this tuple has reprensentation format per element.
def marshall_lines(self, collector):
""" Marshalls a collector and returns the storage/transfer format in
a tuple, this tuple has reprensentation format per element.
"""
if isinstance(collector, collectors.Counter):
exec_method = self._format_counter
elif isinstance(collector, collectors.Gauge):
exec_method = self._format_gauge
elif isinstance(collector, collectors.Summary):
exec_method = self._format_summary
else:
raise TypeError("Not a valid object format")
# create headers
help_header = TextFormat.HELP_FMT.format(name=collector.name,
help_text=collector.help_text)
type_header = TextFormat.TYPE_FMT.format(name=collector.name,
value_type=collector.REPR_STR)
# Prepare start headers
lines = [help_header, type_header]
for i in collector.get_all():
r = exec_method(i, collector.name, collector.const_labels)
# Check if it returns one or multiple lines
if not isinstance(r, str) and isinstance(r, collections.Iterable):
lines.extend(r)
else:
lines.append(r)
return lines |
Marshalls a full registry (various collectors)
def marshall(self, registry):
"""Marshalls a full registry (various collectors)"""
blocks = []
for i in registry.get_all():
blocks.append(self.marshall_collector(i))
# Sort? used in tests
blocks = sorted(blocks)
# Needs EOF
blocks.append("")
return self.__class__.LINE_SEPARATOR_FMT.join(blocks) |
Returns bytes
def marshall(self, registry):
"""Returns bytes"""
result = b""
for i in registry.get_all():
# Each message needs to be prefixed with a varint with the size of
# the message (MetrycType)
# https://github.com/matttproud/golang_protobuf_extensions/blob/master/ext/encode.go
# http://zombietetris.de/blog/building-your-own-writedelimitedto-for-python-protobuf/
body = self.marshall_collector(i).SerializeToString()
msg = encoder._VarintBytes(len(body)) + body
result += msg
return result |
Gathers the metrics
def gather_data(registry):
"""Gathers the metrics"""
# Get the host name of the machine
host = socket.gethostname()
# Create our collectors
trig_metric = Gauge("trigonometry_example",
"Various trigonometry examples.",
{'host': host})
# register the metric collectors
registry.register(trig_metric)
# Start gathering metrics every second
counter = 0
while True:
time.sleep(1)
sine = math.sin(math.radians(counter % 360))
cosine = math.cos(math.radians(counter % 360))
trig_metric.set({'type': "sine"}, sine)
trig_metric.set({'type': "cosine"}, cosine)
counter += 1 |
Sets a value in the container
def set_value(self, labels, value):
""" Sets a value in the container"""
if labels:
self._label_names_correct(labels)
with mutex:
self.values[labels] = value |
Raise exception (ValueError) if labels not correct
def _label_names_correct(self, labels):
"""Raise exception (ValueError) if labels not correct"""
for k, v in labels.items():
# Check reserved labels
if k in RESTRICTED_LABELS_NAMES:
raise ValueError("Labels not correct")
# Check prefixes
if any(k.startswith(i) for i in RESTRICTED_LABELS_PREFIXES):
raise ValueError("Labels not correct")
return True |
Returns a list populated by tuples of 2 elements, first one is
a dict with all the labels and the second elemnt is the value
of the metric itself
def get_all(self):
""" Returns a list populated by tuples of 2 elements, first one is
a dict with all the labels and the second elemnt is the value
of the metric itself
"""
with mutex:
items = self.values.items()
result = []
for k, v in items:
# Check if is a single value dict (custom empty key)
if not k or k == MetricDict.EMPTY_KEY:
key = None
else:
key = decoder.decode(k)
result.append((key, self.get(k)))
return result |
Add adds the given value to the Gauge. (The value can be
negative, resulting in a decrease of the Gauge.)
def add(self, labels, value):
""" Add adds the given value to the Gauge. (The value can be
negative, resulting in a decrease of the Gauge.)
"""
try:
current = self.get_value(labels)
except KeyError:
current = 0
self.set_value(labels, current + value) |
Add adds a single observation to the summary.
def add(self, labels, value):
"""Add adds a single observation to the summary."""
if type(value) not in (float, int):
raise TypeError("Summary only works with digits (int, float)")
# We have already a lock for data but not for the estimator
with mutex:
try:
e = self.get_value(labels)
except KeyError:
# Initialize quantile estimator
e = quantile.Estimator(*self.__class__.DEFAULT_INVARIANTS)
self.set_value(labels, e)
e.observe(float(value)) |
Get gets the data in the form of 0.5, 0.9 and 0.99 percentiles. Also
you get sum and count, all in a dict
def get(self, labels):
""" Get gets the data in the form of 0.5, 0.9 and 0.99 percentiles. Also
you get sum and count, all in a dict
"""
return_data = {}
# We have already a lock for data but not for the estimator
with mutex:
e = self.get_value(labels)
# Set invariants data (default to 0.50, 0.90 and 0.99)
for i in e._invariants:
q = i._quantile
return_data[q] = e.query(q)
# Set sum and count
return_data[self.__class__.SUM_KEY] = e._sum
return_data[self.__class__.COUNT_KEY] = e._observations
return return_data |
Gathers the metrics
def gather_data(registry):
"""Gathers the metrics"""
# Get the host name of the machine
host = socket.gethostname()
# Create our collectors
ram_metric = Gauge("memory_usage_bytes", "Memory usage in bytes.",
{'host': host})
cpu_metric = Gauge("cpu_usage_percent", "CPU usage percent.",
{'host': host})
# register the metric collectors
registry.register(ram_metric)
registry.register(cpu_metric)
# Start gathering metrics every second
while True:
time.sleep(1)
# Add ram metrics
ram = psutil.virtual_memory()
swap = psutil.swap_memory()
ram_metric.set({'type': "virtual", }, ram.used)
ram_metric.set({'type': "virtual", 'status': "cached"}, ram.cached)
ram_metric.set({'type': "swap"}, swap.used)
# Add cpu metrics
for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
cpu_metric.set({'core': c}, p) |
Gathers the metrics
def gather_data(registry):
"""Gathers the metrics"""
# Get the host name of the machine
host = socket.gethostname()
# Create our collectors
io_metric = Summary("write_file_io_example",
"Writing io file in disk example.",
{'host': host})
# register the metric collectors
registry.register(io_metric)
chunk = b'\xff'*4000 # 4000 bytes
filename_path = "/tmp/prometheus_test"
blocksizes = (100, 10000, 1000000, 100000000)
# Start gathering metrics every 0.7 seconds
while True:
time.sleep(0.7)
for i in blocksizes:
time_start = time.time()
# Action
with open(filename_path, "wb") as f:
for _ in range(i // 10000):
f.write(chunk)
io_metric.add({"file": filename_path, "block": i},
time.time() - time_start) |
Returns the first child that matches the given name and
attributes.
def get_child(self, name, attribs=None):
"""
Returns the first child that matches the given name and
attributes.
"""
if name == '.':
if attribs is None or len(attribs) == 0:
return self
if attribs == self.attribs:
return self
return self.child_index.get(nodehash(name, attribs)) |
Creates the given node, regardless of whether or not it already
exists.
Returns the new node.
def create(self, path, data=None):
"""
Creates the given node, regardless of whether or not it already
exists.
Returns the new node.
"""
node = self.current[-1]
path = self._splitpath(path)
n_items = len(path)
for n, item in enumerate(path):
tag, attribs = self._splittag(item)
# The leaf node is always newly created.
if n == n_items-1:
node = node.add(Node(tag, attribs))
break
# Parent nodes are only created if they do not exist yet.
existing = node.get_child(tag, attribs)
if existing is not None:
node = existing
else:
node = node.add(Node(tag, attribs))
if data:
node.text = unquote(data)
return node |
Creates the given node if it does not exist.
Returns the (new or existing) node.
def add(self, path, data=None, replace=False):
"""
Creates the given node if it does not exist.
Returns the (new or existing) node.
"""
node = self.current[-1]
for item in self._splitpath(path):
tag, attribs = self._splittag(item)
next_node = node.get_child(tag, attribs)
if next_node is not None:
node = next_node
else:
node = node.add(Node(tag, attribs))
if replace:
node.text = ''
if data:
if node.text is None:
node.text = unquote(data)
else:
node.text += unquote(data)
return node |
Creates the given attribute and sets it to the given value.
Returns the (new or existing) node to which the attribute was added.
def add_attribute(self, path, name, value):
"""
Creates the given attribute and sets it to the given value.
Returns the (new or existing) node to which the attribute was added.
"""
node = self.add(path)
node.attribs.append((name, value))
return node |
Creates and enters the given node, regardless of whether it already
exists.
Returns the new node.
def open(self, path):
"""
Creates and enters the given node, regardless of whether it already
exists.
Returns the new node.
"""
self.current.append(self.create(path))
return self.current[-1] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.