repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.discard_config | python | def discard_config(self):
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock() | Discard changes (rollback 0). | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L244-L248 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_facts | python | def get_facts(self):
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
} | Return facts of the device. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L255-L274 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_interfaces | python | def get_interfaces(self):
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result | Return interfaces details. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L276-L309 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver._get_address_family | python | def _get_address_family(table):
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family | Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L451-L468 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_bgp_neighbors | python | def get_bgp_neighbors(self):
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict | Return BGP neighbors details. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L516-L628 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_lldp_neighbors | python | def get_lldp_neighbors(self):
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors | Return LLDP neighbors details. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L630-L650 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_lldp_neighbors_detail | python | def get_lldp_neighbors_detail(self, interface=''):
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors | Detailed view of the LLDP neighbors. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L652-L698 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_arp_table | python | def get_arp_table(self):
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table | Return the ARP table. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L1198-L1223 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_ntp_peers | python | def get_ntp_peers(self):
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers} | Return the NTP peers configured on the device. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L1225-L1235 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_ntp_servers | python | def get_ntp_servers(self):
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers} | Return the NTP servers configured on the device. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L1237-L1247 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_ntp_stats | python | def get_ntp_stats(self):
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats | Return NTP stats (associations). | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L1249-L1290 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_mac_address_table | python | def get_mac_address_table(self):
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table | Return the MAC address table. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L1337-L1376 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_probes_config | python | def get_probes_config(self):
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes | Return the configuration of the RPM probes. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L1539-L1572 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.get_probes_results | python | def get_probes_results(self):
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results | Return the results of the RPM probes. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L1574-L1601 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
"""Execute traceroute and return results."""
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
napalm-automation/napalm-junos | napalm_junos/junos.py | JunOSDriver.traceroute | python | def traceroute(self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF):
traceroute_result = {}
# calling form RPC does not work properly :(
# but defined junos_route_instance_table just in case
source_str = ''
maxttl_str = ''
wait_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
wait_str = ' wait {timeout}'.format(timeout=timeout)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format(
destination=destination,
source=source_str,
maxttl=maxttl_str,
wait=wait_str,
vrf=vrf_str
)
traceroute_rpc = E('command', traceroute_command)
rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc
# make direct RPC call via NETCONF
traceroute_results = rpc_reply.find('.//traceroute-results')
traceroute_failure = napalm_base.helpers.find_txt(
traceroute_results, 'traceroute-failure', '')
error_message = napalm_base.helpers.find_txt(
traceroute_results, 'rpc-error/error-message', '')
if traceroute_failure and error_message:
return {'error': '{}: {}'.format(traceroute_failure, error_message)}
traceroute_result['success'] = {}
for hop in traceroute_results.findall('hop'):
ttl_value = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(hop, 'ttl-value'), 1)
if ttl_value not in traceroute_result['success']:
traceroute_result['success'][ttl_value] = {'probes': {}}
for probe in hop.findall('probe-result'):
probe_index = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe, 'probe-index'), 0)
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip, napalm_base.helpers.find_txt(probe, 'ip-address'), '*')
host_name = py23_compat.text_type(
napalm_base.helpers.find_txt(probe, 'host-name', '*'))
rtt = napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms
traceroute_result['success'][ttl_value]['probes'][probe_index] = {
'ip_address': ip_address,
'host_name': host_name,
'rtt': rtt
}
return traceroute_result | Execute traceroute and return results. | train | https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L1603-L1671 | null | class JunOSDriver(NetworkDriver):
"""JunOSDriver class - inherits NetworkDriver from napalm_base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialise JunOS driver.
Optional args:
* config_lock (True/False): lock configuration DB after the connection is established.
* port (int): custom port
* key_file (string): SSH key file path
* keepalive (int): Keepalive interval
* ignore_warning (boolean): not generate warning exceptions
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_replace = False
self.locked = False
# Get optional arguments
if optional_args is None:
optional_args = {}
self.config_lock = optional_args.get('config_lock', False)
self.port = optional_args.get('port', 22)
self.key_file = optional_args.get('key_file', None)
self.keepalive = optional_args.get('keepalive', 30)
self.ssh_config_file = optional_args.get('ssh_config_file', None)
self.ignore_warning = optional_args.get('ignore_warning', False)
if self.key_file:
self.device = Device(hostname,
user=username,
password=password,
ssh_private_key_file=self.key_file,
ssh_config=self.ssh_config_file,
port=self.port)
else:
self.device = Device(hostname,
user=username,
password=password,
port=self.port,
ssh_config=self.ssh_config_file)
self.profile = ["junos"]
def open(self):
"""Open the connection wit the device."""
try:
self.device.open()
except ConnectTimeoutError as cte:
raise ConnectionException(cte.message)
self.device.timeout = self.timeout
self.device._conn._session.transport.set_keepalive(self.keepalive)
if hasattr(self.device, "cu"):
# make sure to remove the cu attr from previous session
# ValueError: requested attribute name cu already exists
del self.device.cu
self.device.bind(cu=Config)
if self.config_lock:
self._lock()
def close(self):
"""Close the connection."""
if self.config_lock:
self._unlock()
self.device.close()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.cu.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.cu.unlock()
self.locked = False
def _rpc(self, get, child=None, **kwargs):
"""
This allows you to construct an arbitrary RPC call to retreive common stuff. For example:
Configuration: get: "<get-configuration/>"
Interface information: get: "<get-interface-information/>"
A particular interfacece information:
get: "<get-interface-information/>"
child: "<interface-name>ge-0/0/0</interface-name>"
"""
rpc = etree.fromstring(get)
if child:
rpc.append(etree.fromstring(child))
response = self.device.execute(rpc)
return etree.tostring(response)
def is_alive(self):
# evaluate the state of the underlying SSH connection
# and also the NETCONF status from PyEZ
return {
'is_alive': self.device._conn._session.transport.is_active() and self.device.connected
}
@staticmethod
def _is_json_format(config):
try:
_ = json.loads(config) # noqa
except (TypeError, ValueError):
return False
return True
def _detect_config_format(self, config):
fmt = 'text'
set_action_matches = [
'set',
'activate',
'deactivate',
'annotate',
'copy',
'delete',
'insert',
'protect',
'rename',
'unprotect',
]
if config.strip().startswith('<'):
return 'xml'
elif config.strip().split(' ')[0] in set_action_matches:
return 'set'
elif self._is_json_format(config):
return 'json'
return fmt
def _load_candidate(self, filename, config, overwrite):
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
if not self.config_lock:
# if not locked during connection time
# will try to lock it if not already aquired
self._lock()
# and the device will be locked till first commit/rollback
try:
fmt = self._detect_config_format(configuration)
if fmt == "xml":
configuration = etree.XML(configuration)
self.device.cu.load(configuration, format=fmt, overwrite=overwrite,
ignore_warning=self.ignore_warning)
except ConfigLoadError as e:
if self.config_replace:
raise ReplaceConfigException(e.errs)
else:
raise MergeConfigException(e.errs)
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.config_replace = True
self._load_candidate(filename, config, True)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.config_replace = False
self._load_candidate(filename, config, False)
def compare_config(self):
"""Compare candidate config with running."""
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
def commit_config(self):
"""Commit configuration."""
self.device.cu.commit(ignore_warning=self.ignore_warning)
if not self.config_lock:
self._unlock()
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.cu.rollback(rb_id=1)
self.commit_config()
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
}
def get_interfaces(self):
"""Return interfaces details."""
result = {}
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
# convert all the tuples to our pre-defined dict structure
for iface in interfaces.keys():
result[iface] = {
'is_up': interfaces[iface]['is_up'],
'is_enabled': interfaces[iface]['is_enabled'],
'description': (interfaces[iface]['description'] or u''),
'last_flapped': float((interfaces[iface]['last_flapped'] or -1)),
'mac_address': napalm_base.helpers.convert(
napalm_base.helpers.mac,
interfaces[iface]['mac_address'],
py23_compat.text_type(interfaces[iface]['mac_address'])),
'speed': -1
}
# result[iface]['last_flapped'] = float(result[iface]['last_flapped'])
match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'')
if match is None:
continue
speed_value = napalm_base.helpers.convert(int, match.group(1), -1)
if speed_value == -1:
continue
speed_unit = match.group(2)
if speed_unit.lower() == 'gbps':
speed_value *= 1000
result[iface]['speed'] = speed_value
return result
def get_interfaces_counters(self):
"""Return interfaces counters."""
query = junos_views.junos_iface_counter_table(self.device)
query.get()
interface_counters = {}
for interface, counters in query.items():
interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters}
return interface_counters
def get_environment(self):
"""Return environment details."""
environment = junos_views.junos_enviroment_table(self.device)
routing_engine = junos_views.junos_routing_engine_table(self.device)
temperature_thresholds = junos_views.junos_temperature_thresholds(self.device)
power_supplies = junos_views.junos_pem_table(self.device)
environment.get()
routing_engine.get()
temperature_thresholds.get()
environment_data = {}
current_class = None
for sensor_object, object_data in environment.items():
structured_object_data = {k: v for k, v in object_data}
if structured_object_data['class']:
# If current object has a 'class' defined, store it for use
# on subsequent unlabeled lines.
current_class = structured_object_data['class']
else:
# Juniper doesn't label the 2nd+ lines of a given class with a
# class name. In that case, we use the most recent class seen.
structured_object_data['class'] = current_class
if structured_object_data['class'] == 'Power':
# Create a dict for the 'power' key
try:
environment_data['power'][sensor_object] = {}
except KeyError:
environment_data['power'] = {}
environment_data['power'][sensor_object] = {}
environment_data['power'][sensor_object]['capacity'] = -1.0
environment_data['power'][sensor_object]['output'] = -1.0
if structured_object_data['class'] == 'Fans':
# Create a dict for the 'fans' key
try:
environment_data['fans'][sensor_object] = {}
except KeyError:
environment_data['fans'] = {}
environment_data['fans'][sensor_object] = {}
status = structured_object_data['status']
env_class = structured_object_data['class']
if (status == 'OK' and env_class == 'Power'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['power'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Power'):
environment_data['power'][sensor_object]['status'] = False
elif (status == 'OK' and env_class == 'Fans'):
# If status is Failed, Absent or Testing, set status to False.
environment_data['fans'][sensor_object]['status'] = True
elif (status != 'OK' and env_class == 'Fans'):
environment_data['fans'][sensor_object]['status'] = False
for temperature_object, temperature_data in temperature_thresholds.items():
structured_temperature_data = {k: v for k, v in temperature_data}
if structured_object_data['class'] == 'Temp':
# Create a dict for the 'temperature' key
try:
environment_data['temperature'][sensor_object] = {}
except KeyError:
environment_data['temperature'] = {}
environment_data['temperature'][sensor_object] = {}
# Check we have a temperature field in this class (See #66)
if structured_object_data['temperature']:
environment_data['temperature'][sensor_object]['temperature'] = \
float(structured_object_data['temperature'])
# Set a default value (False) to the key is_critical and is_alert
environment_data['temperature'][sensor_object]['is_alert'] = False
environment_data['temperature'][sensor_object]['is_critical'] = False
# Check if the working temperature is equal to or higher than alerting threshold
temp = structured_object_data['temperature']
if structured_temperature_data['red-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_critical'] = True
environment_data['temperature'][sensor_object]['is_alert'] = True
elif structured_temperature_data['yellow-alarm'] <= temp:
environment_data['temperature'][sensor_object]['is_alert'] = True
# Try to correct Power Supply information
pem_table = dict()
try:
power_supplies.get()
except RpcError:
# Not all platforms have support for this
pass
else:
# Format PEM information and correct capacity and output values
for pem in power_supplies.items():
pem_name = pem[0].replace("PEM", "Power Supply")
pem_table[pem_name] = dict(pem[1])
environment_data['power'][pem_name]['capacity'] = pem_table[pem_name]['capacity']
environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output']
for routing_engine_object, routing_engine_data in routing_engine.items():
structured_routing_engine_data = {k: v for k, v in routing_engine_data}
# Create dicts for 'cpu' and 'memory'.
try:
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
except KeyError:
environment_data['cpu'] = {}
environment_data['cpu'][routing_engine_object] = {}
environment_data['memory'] = {}
# Calculate the CPU usage by using the CPU idle value.
environment_data['cpu'][routing_engine_object]['%usage'] = \
100.0 - structured_routing_engine_data['cpu-idle']
try:
environment_data['memory']['available_ram'] = \
int(structured_routing_engine_data['memory-dram-size'])
except ValueError:
environment_data['memory']['available_ram'] = \
int(
''.join(
i for i in structured_routing_engine_data['memory-dram-size']
if i.isdigit()
)
)
# Junos gives us RAM in %, so calculation has to be made.
# Sadly, bacause of this, results are not 100% accurate to the truth.
environment_data['memory']['used_ram'] = \
int(round(environment_data['memory']['available_ram'] / 100.0 *
structured_routing_engine_data['memory-buffer-utilization']))
return environment_data
@staticmethod
def _get_address_family(table):
"""
Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family
"""
address_family_mapping = {
'inet': 'ipv4',
'inet6': 'ipv6',
'inetflow': 'flow'
}
family = table.split('.')[-2]
try:
address_family = address_family_mapping[family]
except KeyError:
address_family = family
return address_family
def _parse_route_stats(self, neighbor):
data = {
'ipv4': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
},
'ipv6': {
'received_prefixes': -1,
'accepted_prefixes': -1,
'sent_prefixes': -1
}
}
if not neighbor['is_up']:
return data
elif isinstance(neighbor['tables'], list):
if isinstance(neighbor['sent_prefixes'], int):
# We expect sent_prefixes to be a list, but sometimes it
# is of type int. Therefore convert attribute to list
neighbor['sent_prefixes'] = [neighbor['sent_prefixes']]
for idx, table in enumerate(neighbor['tables']):
family = self._get_address_family(table)
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes'][idx]
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx]
if 'in sync' in neighbor['send-state'][idx]:
data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0)
else:
data[family]['sent_prefixes'] = 0
else:
family = self._get_address_family(neighbor['tables'])
data[family] = {}
data[family]['received_prefixes'] = neighbor['received_prefixes']
data[family]['accepted_prefixes'] = neighbor['accepted_prefixes']
data[family]['sent_prefixes'] = neighbor['sent_prefixes']
return data
@staticmethod
def _parse_value(value):
if isinstance(value, py23_compat.string_types):
return py23_compat.text_type(value)
elif value is None:
return u''
else:
return value
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
bgp_neighbor_data = {}
default_neighbor_details = {
'local_as': 0,
'remote_as': 0,
'remote_id': '',
'is_up': False,
'is_enabled': False,
'description': '',
'uptime': 0,
'address_family': {}
}
keys = default_neighbor_details.keys()
uptime_table = junos_views.junos_bgp_uptime_table(self.device)
bgp_neighbors_table = junos_views.junos_bgp_table(self.device)
uptime_table_lookup = {}
def _get_uptime_table(instance):
if instance not in uptime_table_lookup:
uptime_table_lookup[instance] = uptime_table.get(instance=instance).items()
return uptime_table_lookup[instance]
def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None):
'''
Make sure to execute a simple request whenever using
junos > 13. This is a helper used to avoid code redundancy
and reuse the function also when iterating through the list
BGP neighbors under a specific routing instance,
also when the device is capable to return the routing
instance name at the BGP neighbor level.
'''
for bgp_neighbor in neighbor_data:
peer_ip = napalm_base.helpers.ip(bgp_neighbor[0].split('+')[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
# not instance, means newer Junos version,
# as we request everything in a single request
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
# instance is explicitly requests,
# thus it's an old Junos, so we retrieve the BGP neighbors
# under a certain routing instance
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
if instance_name not in bgp_neighbor_data:
bgp_neighbor_data[instance_name] = {}
if 'router_id' not in bgp_neighbor_data[instance_name]:
# we only need to set this once
bgp_neighbor_data[instance_name]['router_id'] = \
py23_compat.text_type(neighbor_details.get('local_id', ''))
peer = {
key: self._parse_value(value)
for key, value in neighbor_details.items()
if key in keys
}
peer['local_as'] = napalm_base.helpers.as_number(peer['local_as'])
peer['remote_as'] = napalm_base.helpers.as_number(peer['remote_as'])
peer['address_family'] = self._parse_route_stats(neighbor_details)
if 'peers' not in bgp_neighbor_data[instance_name]:
bgp_neighbor_data[instance_name]['peers'] = {}
bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer
if not uptime_table_items:
uptime_table_items = _get_uptime_table(instance)
for neighbor, uptime in uptime_table_items:
if neighbor not in bgp_neighbor_data[instance_name]['peers']:
bgp_neighbor_data[instance_name]['peers'][neighbor] = {}
bgp_neighbor_data[instance_name]['peers'][neighbor]['uptime'] = uptime[0][1]
# Commenting out the following sections, till Junos
# will provide a way to identify the routing instance name
# from the details of the BGP neighbor
# currently, there are Junos 15 version having a field called `peer_fwd_rti`
# but unfortunately, this is not consistent.
# Junos 17 might have this fixed, but this needs to be revisited later.
# In the definition below, `old_junos` means a version that does not provide
# the forwarding RTI information.
#
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device).get()
for instance, instance_data in instances.items():
if instance.startswith('__'):
# junos internal instances
continue
bgp_neighbor_data[instance] = {'peers': {}}
instance_neighbors = bgp_neighbors_table.get(instance=instance).items()
uptime_table_items = uptime_table.get(instance=instance).items()
_get_bgp_neighbors_core(instance_neighbors,
instance=instance,
uptime_table_items=uptime_table_items)
# If the OS provides the `peer_fwd_rti` or any way to identify the
# rotuing instance name (see above), the performances of this getter
# can be significantly improved, as we won't execute one request
# for each an every RT.
# However, this improvement would only be beneficial for multi-VRF envs.
#
# else:
# instance_neighbors = bgp_neighbors_table.get().items()
# _get_bgp_neighbors_core(instance_neighbors)
bgp_tmp_dict = {}
for k, v in bgp_neighbor_data.items():
if bgp_neighbor_data[k]['peers']:
bgp_tmp_dict[k] = v
return bgp_tmp_dict
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
lldp = junos_views.junos_lldp_table(self.device)
try:
lldp.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
result = lldp.items()
neighbors = {}
for neigh in result:
if neigh[0] not in neighbors.keys():
neighbors[neigh[0]] = []
neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]})
return neighbors
def get_lldp_neighbors_detail(self, interface=''):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors = {}
lldp_table = junos_views.junos_lldp_neighbors_detail_table(self.device)
try:
lldp_table.get()
except RpcError as rpcerr:
# this assumes the library runs in an environment
# able to handle logs
# otherwise, the user just won't see this happening
log.error('Unable to retrieve the LLDP neighbors information:')
log.error(rpcerr.message)
return {}
interfaces = lldp_table.get().keys()
# get lldp neighbor by interface rpc for EX Series, QFX Series, J Series
# and SRX Series is get-lldp-interface-neighbors-information,
# and rpc for M, MX, and T Series is get-lldp-interface-neighbors
# ref1: https://apps.juniper.net/xmlapi/operTags.jsp (Junos 13.1 and later)
# ref2: https://www.juniper.net/documentation/en_US/junos12.3/information-products/topic-collections/junos-xml-ref-oper/index.html (Junos 12.3) # noqa
lldp_table.GET_RPC = 'get-lldp-interface-neighbors'
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.GET_RPC = 'get-lldp-interface-neighbors-information'
for interface in interfaces:
if self.device.facts.get('personality') not in ('MX', 'M', 'T'):
lldp_table.get(interface_name=interface)
else:
lldp_table.get(interface_device=interface)
for item in lldp_table:
if interface not in lldp_neighbors.keys():
lldp_neighbors[interface] = []
lldp_neighbors[interface].append({
'parent_interface': item.parent_interface,
'remote_port': item.remote_port,
'remote_chassis_id': napalm_base.helpers.convert(
napalm_base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id),
'remote_port_description': napalm_base.helpers.convert(
py23_compat.text_type, item.remote_port_description),
'remote_system_name': item.remote_system_name,
'remote_system_description': item.remote_system_description,
'remote_system_capab': item.remote_system_capab,
'remote_system_enable_capab': item.remote_system_enable_capab
})
return lldp_neighbors
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
cli_output = {}
def _count(txt, none): # Second arg for consistency only. noqa
'''
Return the exact output, as Junos displays
e.g.:
> show system processes extensive | match root | count
Count: 113 lines
'''
count = len(txt.splitlines())
return 'Count: {count} lines'.format(count=count)
def _trim(txt, length):
'''
Trim specified number of columns from start of line.
'''
try:
newlines = []
for line in txt.splitlines():
newlines.append(line[int(length):])
return '\n'.join(newlines)
except ValueError:
return txt
def _except(txt, pattern):
'''
Show only text that does not match a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
unmatched = [
line for line in txt.splitlines()
if not re.search(rgx, line, re.I)
]
return '\n'.join(unmatched)
def _last(txt, length):
'''
Display end of output only.
'''
try:
return '\n'.join(
txt.splitlines()[(-1)*int(length):]
)
except ValueError:
return txt
def _match(txt, pattern):
'''
Show only text that matches a pattern.
'''
rgx = '^.*({pattern}).*$'.format(pattern=pattern)
matched = [
line for line in txt.splitlines()
if re.search(rgx, line, re.I)
]
return '\n'.join(matched)
def _find(txt, pattern):
'''
Search for first occurrence of pattern.
'''
rgx = '^.*({pattern})(.*)$'.format(pattern=pattern)
match = re.search(rgx, txt, re.I | re.M | re.DOTALL)
if match:
return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2))
else:
return '\nPattern not found'
def _process_pipe(cmd, txt):
'''
Process CLI output from Juniper device that
doesn't allow piping the output.
'''
if txt is not None:
return txt
_OF_MAP = OrderedDict()
_OF_MAP['except'] = _except
_OF_MAP['match'] = _match
_OF_MAP['last'] = _last
_OF_MAP['trim'] = _trim
_OF_MAP['count'] = _count
_OF_MAP['find'] = _find
# the operations order matter in this case!
exploded_cmd = cmd.split('|')
pipe_oper_args = {}
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
pipe_args = ''.join(exploded_pipe[1:2])
# will not throw error when there's no arg
pipe_oper_args[pipe_oper] = pipe_args
for oper in _OF_MAP.keys():
# to make sure the operation sequence is correct
if oper not in pipe_oper_args.keys():
continue
txt = _OF_MAP[oper](txt, pipe_oper_args[oper])
return txt
if not isinstance(commands, list):
raise TypeError('Please enter a valid list of commands!')
_PIPE_BLACKLIST = ['save']
# Preprocessing to avoid forbidden commands
for command in commands:
exploded_cmd = command.split('|')
command_safe_parts = []
for pipe in exploded_cmd[1:]:
exploded_pipe = pipe.split()
pipe_oper = exploded_pipe[0] # always there
if pipe_oper in _PIPE_BLACKLIST:
continue
pipe_args = ''.join(exploded_pipe[1:2])
safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper,
args=pipe_args)
command_safe_parts.append(safe_pipe)
safe_command = exploded_cmd[0] if not command_safe_parts else\
'{base} | {pipes}'.format(base=exploded_cmd[0],
pipes=' | '.join(command_safe_parts))
raw_txt = self.device.cli(safe_command, warning=False)
cli_output[py23_compat.text_type(command)] = py23_compat.text_type(
_process_pipe(command, raw_txt))
return cli_output
def get_bgp_config(self, group='', neighbor=''):
"""Return BGP configuration."""
def update_dict(d, u): # for deep dictionary update
for k, v in u.items():
if isinstance(d, collections.Mapping):
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
else:
d = {k: u[k]}
return d
def build_prefix_limit(**args):
"""
Transform the lements of a dictionary into nested dictionaries.
Example:
{
'inet_unicast_limit': 500,
'inet_unicast_teardown_threshold': 95,
'inet_unicast_teardown_timeout': 5
}
becomes:
{
'inet': {
'unicast': {
'limit': 500,
'teardown': {
'threshold': 95,
'timeout': 5
}
}
}
}
"""
prefix_limit = {}
for key, value in args.items():
key_levels = key.split('_')
length = len(key_levels)-1
temp_dict = {
key_levels[length]: value
}
for index in reversed(range(length)):
level = key_levels[index]
temp_dict = {level: temp_dict}
update_dict(prefix_limit, temp_dict)
return prefix_limit
_COMMON_FIELDS_DATATYPE_ = {
'description': py23_compat.text_type,
'local_address': py23_compat.text_type,
'local_as': int,
'remote_as': int,
'import_policy': py23_compat.text_type,
'export_policy': py23_compat.text_type,
'inet_unicast_limit_prefix_limit': int,
'inet_unicast_teardown_threshold_prefix_limit': int,
'inet_unicast_teardown_timeout_prefix_limit': int,
'inet_unicast_novalidate_prefix_limit': int,
'inet_flow_limit_prefix_limit': int,
'inet_flow_teardown_threshold_prefix_limit': int,
'inet_flow_teardown_timeout_prefix_limit': int,
'inet_flow_novalidate_prefix_limit': py23_compat.text_type,
'inet6_unicast_limit_prefix_limit': int,
'inet6_unicast_teardown_threshold_prefix_limit': int,
'inet6_unicast_teardown_timeout_prefix_limit': int,
'inet6_unicast_novalidate_prefix_limit': int,
'inet6_flow_limit_prefix_limit': int,
'inet6_flow_teardown_threshold_prefix_limit': int,
'inet6_flow_teardown_timeout_prefix_limit': int,
'inet6_flow_novalidate_prefix_limit': py23_compat.text_type,
}
_PEER_FIELDS_DATATYPE_MAP_ = {
'authentication_key': py23_compat.text_type,
'route_reflector_client': bool,
'nhs': bool
}
_PEER_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_GROUP_FIELDS_DATATYPE_MAP_ = {
'type': py23_compat.text_type,
'apply_groups': list,
'remove_private_as': bool,
'multipath': bool,
'multihop_ttl': int
}
_GROUP_FIELDS_DATATYPE_MAP_.update(
_COMMON_FIELDS_DATATYPE_
)
_DATATYPE_DEFAULT_ = {
py23_compat.text_type: '',
int: 0,
bool: False,
list: []
}
bgp_config = {}
if group:
bgp = junos_views.junos_bgp_config_group_table(self.device)
bgp.get(group=group)
else:
bgp = junos_views.junos_bgp_config_table(self.device)
bgp.get()
neighbor = '' # if no group is set, no neighbor should be set either
bgp_items = bgp.items()
if neighbor:
neighbor_ip = napalm_base.helpers.ip(neighbor)
for bgp_group in bgp_items:
bgp_group_name = bgp_group[0]
bgp_group_details = bgp_group[1]
bgp_config[bgp_group_name] = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
if key == 'neighbors':
bgp_group_peers = value
continue
bgp_config[bgp_group_name].update({
key: napalm_base.helpers.convert(datatype, value, default)
})
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
if 'multihop' in bgp_config[bgp_group_name].keys():
# Delete 'multihop' key from the output
del bgp_config[bgp_group_name]['multihop']
if bgp_config[bgp_group_name]['multihop_ttl'] == 0:
# Set ttl to default value 64
bgp_config[bgp_group_name]['multihop_ttl'] = 64
bgp_config[bgp_group_name]['neighbors'] = {}
for bgp_group_neighbor in bgp_group_peers.items():
bgp_peer_address = napalm_base.helpers.ip(bgp_group_neighbor[0])
if neighbor and bgp_peer_address != neighbor:
continue # if filters applied, jump over all other neighbors
bgp_group_details = bgp_group_neighbor[1]
bgp_peer_details = {
field: _DATATYPE_DEFAULT_.get(datatype)
for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items()
if '_prefix_limit' not in field
}
for elem in bgp_group_details:
if not('_prefix_limit' not in elem[0] and elem[1] is not None):
continue
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
key = elem[0]
value = elem[1]
if key in ['export_policy', 'import_policy']:
if isinstance(value, list):
value = ' '.join(value)
if key == 'local_address':
value = napalm_base.helpers.convert(
napalm_base.helpers.ip, value, value)
bgp_peer_details.update({
key: napalm_base.helpers.convert(datatype, value, default)
})
bgp_peer_details['local_as'] = napalm_base.helpers.as_number(
bgp_peer_details['local_as'])
bgp_peer_details['remote_as'] = napalm_base.helpers.as_number(
bgp_peer_details['remote_as'])
if key == 'cluster':
bgp_peer_details['route_reflector_client'] = True
# we do not want cluster in the output
del bgp_peer_details['cluster']
if 'cluster' in bgp_config[bgp_group_name].keys():
bgp_peer_details['route_reflector_client'] = True
prefix_limit_fields = {}
for elem in bgp_group_details:
if '_prefix_limit' in elem[0] and elem[1] is not None:
datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0])
default = _DATATYPE_DEFAULT_.get(datatype)
prefix_limit_fields.update({
elem[0].replace('_prefix_limit', ''):
napalm_base.helpers.convert(datatype, elem[1], default)
})
bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields)
bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details
if neighbor and bgp_peer_address == neighbor_ip:
break # found the desired neighbor
if 'cluster' in bgp_config[bgp_group_name].keys():
# we do not want cluster in the output
del bgp_config[bgp_group_name]['cluster']
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Detailed view of the BGP neighbors operational data."""
bgp_neighbors = {}
default_neighbor_details = {
'up': False,
'local_as': 0,
'remote_as': 0,
'router_id': u'',
'local_address': u'',
'routing_table': u'',
'local_address_configured': False,
'local_port': 0,
'remote_address': u'',
'remote_port': 0,
'multihop': False,
'multipath': False,
'remove_private_as': False,
'import_policy': u'',
'export_policy': u'',
'input_messages': -1,
'output_messages': -1,
'input_updates': -1,
'output_updates': -1,
'messages_queued_out': -1,
'connection_state': u'',
'previous_connection_state': u'',
'last_event': u'',
'suppress_4byte_as': False,
'local_as_prepend': False,
'holdtime': 0,
'configured_holdtime': 0,
'keepalive': 0,
'configured_keepalive': 0,
'active_prefix_count': -1,
'received_prefix_count': -1,
'accepted_prefix_count': -1,
'suppressed_prefix_count': -1,
'advertised_prefix_count': -1,
'flap_count': 0
}
OPTION_KEY_MAP = {
'RemovePrivateAS': 'remove_private_as',
'Multipath': 'multipath',
'Multihop': 'multihop',
'AddressFamily': 'local_address_configured'
# 'AuthKey' : 'authentication_key_set'
# but other vendors do not specify if auth key is set
# other options:
# Preference, HoldTime, Ttl, LogUpDown, Refresh
}
def _bgp_iter_core(neighbor_data, instance=None):
'''
Iterate over a list of neighbors.
For older junos, the routing instance is not specified inside the
BGP neighbors XML, therefore we need to use a super sub-optimal structure
as in get_bgp_neighbors: iterate through the list of network instances
then execute one request for each and every routing instance.
For newer junos, this is not necessary as the routing instance is available
and we can get everything solve in a single request.
'''
for bgp_neighbor in neighbor_data:
remote_as = int(bgp_neighbor[0])
neighbor_details = deepcopy(default_neighbor_details)
neighbor_details.update(
{elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None}
)
if not instance:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti')
instance = peer_fwd_rti
else:
peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '')
instance_name = 'global' if instance == 'master' else instance
options = neighbor_details.pop('options', '')
if isinstance(options, str):
options_list = options.split()
for option in options_list:
key = OPTION_KEY_MAP.get(option)
if key is not None:
neighbor_details[key] = True
four_byte_as = neighbor_details.pop('4byte_as', 0)
local_address = neighbor_details.pop('local_address', '')
local_details = local_address.split('+')
neighbor_details['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, local_details[0], local_details[0])
if len(local_details) == 2:
neighbor_details['local_port'] = int(local_details[1])
else:
neighbor_details['local_port'] = 179
neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as)
peer_address = neighbor_details.pop('peer_address', '')
remote_details = peer_address.split('+')
neighbor_details['remote_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, remote_details[0], remote_details[0])
if len(remote_details) == 2:
neighbor_details['remote_port'] = int(remote_details[1])
else:
neighbor_details['remote_port'] = 179
neighbor_details['routing_table'] = instance_name
neighbor_details['local_as'] = napalm_base.helpers.as_number(
neighbor_details['local_as'])
neighbor_details['remote_as'] = napalm_base.helpers.as_number(
neighbor_details['remote_as'])
neighbors_rib = neighbor_details.pop('rib')
neighbors_queue = neighbor_details.pop('queue')
messages_queued_out = 0
for queue_entry in neighbors_queue.items():
messages_queued_out += queue_entry[1][0][1]
neighbor_details['messages_queued_out'] = messages_queued_out
if instance_name not in bgp_neighbors.keys():
bgp_neighbors[instance_name] = {}
if remote_as not in bgp_neighbors[instance_name].keys():
bgp_neighbors[instance_name][remote_as] = []
neighbor_rib_stats = neighbors_rib.items()
if not neighbor_rib_stats:
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
continue # no RIBs available, pass default details
neighbor_rib_details = {
'active_prefix_count': 0,
'received_prefix_count': 0,
'accepted_prefix_count': 0,
'suppressed_prefix_count': 0,
'advertised_prefix_count': 0
}
for rib_entry in neighbor_rib_stats:
for elem in rib_entry[1]:
if elem[1] is None:
neighbor_rib_details[elem[0]] += 0
else:
neighbor_rib_details[elem[0]] += elem[1]
neighbor_details.update(neighbor_rib_details)
bgp_neighbors[instance_name][remote_as].append(neighbor_details)
# old_junos = napalm_base.helpers.convert(
# int, self.device.facts.get('version', '0.0').split('.')[0], 0) < 15
bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# if old_junos:
instances = junos_views.junos_route_instance_table(self.device)
for instance, instance_data in instances.get().items():
if instance.startswith('__'):
# junos internal instances
continue
neighbor_data = bgp_neighbors_table.get(instance=instance,
neighbor_address=str(neighbor_address)).items()
_bgp_iter_core(neighbor_data, instance=instance)
# else:
# bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device)
# neighbor_data = bgp_neighbors_table.get(neighbor_address=neighbor_address).items()
# _bgp_iter_core(neighbor_data)
return bgp_neighbors
def get_arp_table(self):
"""Return the ARP table."""
# could use ArpTable
# from jnpr.junos.op.phyport import ArpTable
# and simply use it
# but
# we need:
# - filters
# - group by VLAN ID
# - hostname & TTE fields as well
arp_table = []
arp_table_raw = junos_views.junos_arp_table(self.device)
arp_table_raw.get()
arp_table_items = arp_table_raw.items()
for arp_table_entry in arp_table_items:
arp_entry = {
elem[0]: elem[1] for elem in arp_table_entry[1]
}
arp_entry['mac'] = napalm_base.helpers.mac(arp_entry.get('mac'))
arp_entry['ip'] = napalm_base.helpers.ip(arp_entry.get('ip'))
arp_table.append(arp_entry)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_table = junos_views.junos_ntp_peers_config_table(self.device)
ntp_table.get()
ntp_peers = ntp_table.items()
if not ntp_peers:
return {}
return {napalm_base.helpers.ip(peer[0]): {} for peer in ntp_peers}
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
# NTP Peers does not have XML RPC defined
# thus we need to retrieve raw text and parse...
# :(
ntp_stats = []
REGEX = (
'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)'
'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})'
'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)'
'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)'
'\s+([0-9\.]+)\s?$'
)
ntp_assoc_output = self.device.cli('show ntp associations no-resolve')
ntp_assoc_output_lines = ntp_assoc_output.splitlines()
for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line
line_search = re.search(REGEX, ntp_assoc_output_line, re.I)
if not line_search:
continue # pattern not found
line_groups = line_search.groups()
try:
ntp_stats.append({
'remote': napalm_base.helpers.ip(line_groups[1]),
'synchronized': (line_groups[0] == '*'),
'referenceid': py23_compat.text_type(line_groups[2]),
'stratum': int(line_groups[3]),
'type': py23_compat.text_type(line_groups[4]),
'when': py23_compat.text_type(line_groups[5]),
'hostpoll': int(line_groups[6]),
'reachability': int(line_groups[7]),
'delay': float(line_groups[8]),
'offset': float(line_groups[9]),
'jitter': float(line_groups[10])
})
except Exception:
continue # jump to next line
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
interface_table = junos_views.junos_ip_interfaces_table(self.device)
interface_table.get()
interface_table_items = interface_table.items()
_FAMILY_VMAP_ = {
'inet': u'ipv4',
'inet6': u'ipv6'
# can add more mappings
}
_FAMILY_MAX_PREFIXLEN = {
'inet': 32,
'inet6': 128
}
for interface_details in interface_table_items:
ip_network = interface_details[0]
ip_address = ip_network.split('/')[0]
address = napalm_base.helpers.convert(
napalm_base.helpers.ip, ip_address, ip_address)
try:
interface_details_dict = dict(interface_details[1])
family_raw = interface_details_dict.get('family')
interface = py23_compat.text_type(interface_details_dict.get('interface'))
except ValueError:
continue
prefix = napalm_base.helpers.convert(int,
ip_network.split('/')[-1],
_FAMILY_MAX_PREFIXLEN.get(family_raw))
family = _FAMILY_VMAP_.get(family_raw)
if not family or not interface:
continue
if interface not in interfaces_ip.keys():
interfaces_ip[interface] = {}
if family not in interfaces_ip[interface].keys():
interfaces_ip[interface][family] = {}
if address not in interfaces_ip[interface][family].keys():
interfaces_ip[interface][family][address] = {}
interfaces_ip[interface][family][address][u'prefix_length'] = prefix
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_address_table = []
if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices
if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices
mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device)
else:
mac_table = junos_views.junos_mac_address_table_switch(self.device)
else:
mac_table = junos_views.junos_mac_address_table(self.device)
mac_table.get()
mac_table_items = mac_table.items()
default_values = {
'mac': u'',
'interface': u'',
'vlan': 0,
'static': False,
'active': True,
'moves': 0,
'last_move': 0.0
}
for mac_table_entry in mac_table_items:
mac_entry = default_values.copy()
mac_entry.update(
{elem[0]: elem[1] for elem in mac_table_entry[1]}
)
mac = mac_entry.get('mac')
# JUNOS returns '*' for Type = Flood
if mac == '*':
continue
mac_entry['mac'] = napalm_base.helpers.mac(mac)
mac_address_table.append(mac_entry)
return mac_address_table
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
routes = {}
if not isinstance(destination, py23_compat.string_types):
raise TypeError('Please specify a valid destination!')
if protocol and isinstance(destination, py23_compat.string_types):
protocol = protocol.lower()
if protocol == 'connected':
protocol = 'direct' # this is how is called on JunOS
_COMMON_PROTOCOL_FIELDS_ = [
'destination',
'prefix_length',
'protocol',
'current_active',
'last_active',
'age',
'next_hop',
'outgoing_interface',
'selected_next_hop',
'preference',
'inactive_reason',
'routing_table'
] # identifies the list of fileds common for all protocols
_BOOLEAN_FIELDS_ = [
'current_active',
'selected_next_hop',
'last_active'
] # fields expected to have boolean values
_PROTOCOL_SPECIFIC_FIELDS_ = {
'bgp': [
'local_as',
'remote_as',
'as_path',
'communities',
'local_preference',
'preference2',
'remote_address',
'metric',
'metric2'
],
'isis': [
'level',
'metric',
'local_as'
]
}
routes_table = junos_views.junos_protocol_route_table(self.device)
rt_kargs = {
'destination': destination
}
if protocol and isinstance(destination, py23_compat.string_types):
rt_kargs['protocol'] = protocol
try:
routes_table.get(**rt_kargs)
except RpcTimeoutError:
# on devices with milions of routes
# in case the destination is too generic (e.g.: 10/8)
# will take very very long to determine all routes and
# moreover will return a huge list
raise CommandTimeoutException(
'Too many routes returned! Please try with a longer prefix or a specific protocol!'
)
except RpcError as rpce:
if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]:
raise CommandErrorException(
'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element']))
raise CommandErrorException(rpce)
except Exception as err:
raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err))
routes_items = routes_table.items()
for route in routes_items:
d = {}
# next_hop = route[0]
d = {elem[0]: elem[1] for elem in route[1]}
destination = napalm_base.helpers.ip(d.pop('destination', ''))
prefix_length = d.pop('prefix_length', 32)
destination = '{d}/{p}'.format(
d=destination,
p=prefix_length
)
d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None})
as_path = d.get('as_path')
if as_path is not None:
d['as_path'] = as_path.split(' I ')[0]\
.replace('AS path:', '')\
.replace('I', '')\
.strip()
# to be sure that contains only AS Numbers
if d.get('inactive_reason') is None:
d['inactive_reason'] = u''
route_protocol = d.get('protocol').lower()
if protocol and protocol != route_protocol:
continue
communities = d.get('communities')
if communities is not None and type(communities) is not list:
d['communities'] = [communities]
d_keys = list(d.keys())
# fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific
all_protocol_attributes = {
key: d.pop(key)
for key in d_keys
if key not in _COMMON_PROTOCOL_FIELDS_
}
protocol_attributes = {
key: value for key, value in all_protocol_attributes.items()
if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, [])
}
d['protocol_attributes'] = protocol_attributes
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
snmp_config = junos_views.junos_snmp_config_table(self.device)
snmp_config.get()
snmp_items = snmp_config.items()
if not snmp_items:
return snmp_information
snmp_information = {
py23_compat.text_type(ele[0]): ele[1] if ele[1] else ''
for ele in snmp_items[0][1]
}
snmp_information['community'] = {}
communities_table = snmp_information.pop('communities_table')
if not communities_table:
return snmp_information
for community in communities_table.items():
community_name = py23_compat.text_type(community[0])
community_details = {
'acl': ''
}
community_details.update({
py23_compat.text_type(ele[0]): py23_compat.text_type(
ele[1] if ele[0] != 'mode'
else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]))
for ele in community[1]
})
snmp_information['community'][community_name] = community_details
return snmp_information
def get_probes_config(self):
"""Return the configuration of the RPM probes."""
probes = {}
probes_table = junos_views.junos_rpm_probes_config_table(self.device)
probes_table.get()
probes_table_items = probes_table.items()
for probe_test in probes_table_items:
test_name = py23_compat.text_type(probe_test[0])
test_details = {
p[0]: p[1] for p in probe_test[1]
}
probe_name = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_name'))
target = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('target', ''))
test_interval = napalm_base.helpers.convert(int, test_details.pop('test_interval', '0'))
probe_count = napalm_base.helpers.convert(int, test_details.pop('probe_count', '0'))
probe_type = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('probe_type', ''))
source = napalm_base.helpers.convert(
py23_compat.text_type, test_details.pop('source_address', ''))
if probe_name not in probes.keys():
probes[probe_name] = {}
probes[probe_name][test_name] = {
'probe_type': probe_type,
'target': target,
'source': source,
'probe_count': probe_count,
'test_interval': test_interval
}
return probes
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results
def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF):
ping_dict = {}
source_str = ''
maxttl_str = ''
timeout_str = ''
size_str = ''
count_str = ''
vrf_str = ''
if source:
source_str = ' source {source}'.format(source=source)
if ttl:
maxttl_str = ' ttl {ttl}'.format(ttl=ttl)
if timeout:
timeout_str = ' wait {timeout}'.format(timeout=timeout)
if size:
size_str = ' size {size}'.format(size=size)
if count:
count_str = ' count {count}'.format(count=count)
if vrf:
vrf_str = ' routing-instance {vrf}'.format(vrf=vrf)
ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format(
destination=destination,
source=source_str,
ttl=maxttl_str,
timeout=timeout_str,
size=size_str,
count=count_str,
vrf=vrf_str
)
ping_rpc = E('command', ping_command)
rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc
# make direct RPC call via NETCONF
probe_summary = rpc_reply.find('.//probe-results-summary')
if probe_summary is None:
rpc_error = rpc_reply.find('.//rpc-error')
return {'error': '{}'.format(
napalm_base.helpers.find_txt(rpc_error, 'error-message'))}
packet_loss = napalm_base.helpers.convert(
int, napalm_base.helpers.find_txt(probe_summary, 'packet-loss'), 100)
# rtt values are valid only if a we get an ICMP reply
if packet_loss is not 100:
ping_dict['success'] = {}
ping_dict['success']['probes_sent'] = int(
probe_summary.findtext("probes-sent"))
ping_dict['success']['packet_loss'] = packet_loss
ping_dict['success'].update({
'rtt_min': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-minimum'), -1) * 1e-3), 3),
'rtt_max': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-maximum'), -1) * 1e-3), 3),
'rtt_avg': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-average'), -1) * 1e-3), 3),
'rtt_stddev': round((napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_summary, 'rtt-stddev'), -1) * 1e-3), 3)
})
tmp = rpc_reply.find('.//ping-results')
results_array = []
for probe_result in tmp.findall('probe-result'):
ip_address = napalm_base.helpers.convert(
napalm_base.helpers.ip,
napalm_base.helpers.find_txt(probe_result, 'ip-address'), '*')
rtt = round(
(napalm_base.helpers.convert(
float, napalm_base.helpers.find_txt(
probe_result, 'rtt'), -1) * 1e-3), 3)
results_array.append({'ip_address': ip_address,
'rtt': rtt})
ping_dict['success'].update({'results': results_array})
else:
return {'error': 'Packet loss {}'.format(packet_loss)}
return ping_dict
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
'super-user': 15,
'superuser': 15,
'operator': 5,
'read-only': 1,
'unauthorized': 0
}
_DEFAULT_USER_DETAILS = {
'level': 0,
'password': '',
'sshkeys': []
}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({
d[0]: d[1] for d in user_entry[1] if d[1]
})
user_class = user_details.pop('class', '')
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({
'level': level
})
user_details['sshkeys'] = [
user_details.pop(key)
for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa']
if user_details.get(key, '')
]
users[username] = user_details
return users
def get_optics(self):
"""Return optics information."""
optics_table = junos_views.junos_intf_optics_table(self.device)
optics_table.get()
optics_items = optics_table.items()
# optics_items has no lane information, so we need to re-format data
# inserting lane 0 for all optics. Note it contains all optics 10G/40G/100G
# but the information for 40G/100G is incorrect at this point
# Example: intf_optic item is now: ('xe-0/0/0', [ optical_values ])
optics_items_with_lane = []
for intf_optic_item in optics_items:
temp_list = list(intf_optic_item)
temp_list.insert(1, u"0")
new_intf_optic_item = tuple(temp_list)
optics_items_with_lane.append(new_intf_optic_item)
# Now optics_items_with_lane has all optics with lane 0 included
# Example: ('xe-0/0/0', u'0', [ optical_values ])
# Get optical information for 40G/100G optics
optics_table40G = junos_views.junos_intf_40Goptics_table(self.device)
optics_table40G.get()
optics_40Gitems = optics_table40G.items()
# Re-format data as before inserting lane value
new_optics_40Gitems = []
for item in optics_40Gitems:
lane = item[0]
iface = item[1].pop(0)
new_optics_40Gitems.append((iface[1], py23_compat.text_type(lane), item[1]))
# New_optics_40Gitems contains 40G/100G optics only:
# ('et-0/0/49', u'0', [ optical_values ]),
# ('et-0/0/49', u'1', [ optical_values ]),
# ('et-0/0/49', u'2', [ optical_values ])
# Remove 40G/100G optics entries with wrong information returned
# from junos_intf_optics_table()
iface_40G = [item[0] for item in new_optics_40Gitems]
for intf_optic_item in optics_items_with_lane:
iface_name = intf_optic_item[0]
if iface_name not in iface_40G:
new_optics_40Gitems.append(intf_optic_item)
# New_optics_40Gitems contains all optics 10G/40G/100G with the lane
optics_detail = {}
for intf_optic_item in new_optics_40Gitems:
lane = intf_optic_item[1]
interface_name = py23_compat.text_type(intf_optic_item[0])
optics = dict(intf_optic_item[2])
if interface_name not in optics_detail:
optics_detail[interface_name] = {}
optics_detail[interface_name]['physical_channels'] = {}
optics_detail[interface_name]['physical_channels']['channel'] = []
# Defaulting avg, min, max values to 0.0 since device does not
# return these values
intf_optics = {
'index': int(lane),
'state': {
'input_power': {
'instant': (
float(optics['input_power'])
if optics['input_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'output_power': {
'instant': (
float(optics['output_power'])
if optics['output_power'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
},
'laser_bias_current': {
'instant': (
float(optics['laser_bias_current'])
if optics['laser_bias_current'] not in
[None, C.OPTICS_NULL_LEVEL]
else 0.0),
'avg': 0.0,
'max': 0.0,
'min': 0.0
}
}
}
optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics)
return optics_detail
def get_config(self, retrieve='all'):
rv = {
'startup': '',
'running': '',
'candidate': ''
}
options = {
'format': 'text',
'database': 'candidate'
}
if retrieve in ('candidate', 'all'):
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['candidate'] = py23_compat.text_type(config.text)
if retrieve in ('running', 'all'):
options['database'] = 'committed'
config = self.device.rpc.get_config(filter_xml=None, options=options)
rv['running'] = py23_compat.text_type(config.text)
return rv
def get_network_instances(self, name=''):
network_instances = {}
ri_table = junos_views.junos_nw_instances_table(self.device)
ri_table.get()
ri_entries = ri_table.items()
vrf_interfaces = []
for ri_entry in ri_entries:
ri_name = py23_compat.text_type(ri_entry[0])
ri_details = {
d[0]: d[1] for d in ri_entry[1]
}
ri_type = ri_details['instance_type']
if ri_type is None:
ri_type = 'default'
ri_rd = ri_details['route_distinguisher']
ri_interfaces = ri_details['interfaces']
if not isinstance(ri_interfaces, list):
ri_interfaces = [ri_interfaces]
network_instances[ri_name] = {
'name': ri_name,
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw
'state': {
'route_distinguisher': ri_rd if ri_rd else ''
},
'interfaces': {
'interface': {
intrf_name: {} for intrf_name in ri_interfaces if intrf_name
}
}
}
vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys())
all_interfaces = self.get_interfaces().keys()
default_interfaces = list(set(all_interfaces) - set(vrf_interfaces))
if 'default' not in network_instances:
network_instances['default'] = {
'name': 'default',
'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'),
'state': {
'route_distinguisher': ''
},
'interfaces': {
'interface': {
py23_compat.text_type(intrf_name): {}
for intrf_name in default_interfaces
}
}
}
if not name:
return network_instances
if name not in network_instances:
return {}
return {name: network_instances[name]}
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.move_left | python | def move_left(self):
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0) | Make the drone move left. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L55-L57 | [
"def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n with self.lock:\n self.com_watchdog_timer.cancel()\n cmd(self.host, self.sequence, *args, **kwargs)\n self.sequence += 1\n self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)\n self.com_watchdog_timer.start()\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.move_right | python | def move_right(self):
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0) | Make the drone move right. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L59-L61 | [
"def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n with self.lock:\n self.com_watchdog_timer.cancel()\n cmd(self.host, self.sequence, *args, **kwargs)\n self.sequence += 1\n self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)\n self.com_watchdog_timer.start()\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.move_up | python | def move_up(self):
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0) | Make the drone rise upwards. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L63-L65 | [
"def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n with self.lock:\n self.com_watchdog_timer.cancel()\n cmd(self.host, self.sequence, *args, **kwargs)\n self.sequence += 1\n self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)\n self.com_watchdog_timer.start()\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.move_down | python | def move_down(self):
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0) | Make the drone decent downwards. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L67-L69 | [
"def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n with self.lock:\n self.com_watchdog_timer.cancel()\n cmd(self.host, self.sequence, *args, **kwargs)\n self.sequence += 1\n self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)\n self.com_watchdog_timer.start()\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.move_forward | python | def move_forward(self):
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0) | Make the drone move forward. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L71-L73 | [
"def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n with self.lock:\n self.com_watchdog_timer.cancel()\n cmd(self.host, self.sequence, *args, **kwargs)\n self.sequence += 1\n self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)\n self.com_watchdog_timer.start()\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.move_backward | python | def move_backward(self):
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0) | Make the drone move backwards. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L75-L77 | [
"def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n with self.lock:\n self.com_watchdog_timer.cancel()\n cmd(self.host, self.sequence, *args, **kwargs)\n self.sequence += 1\n self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)\n self.com_watchdog_timer.start()\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.turn_left | python | def turn_left(self):
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed) | Make the drone rotate left. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L79-L81 | [
"def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n with self.lock:\n self.com_watchdog_timer.cancel()\n cmd(self.host, self.sequence, *args, **kwargs)\n self.sequence += 1\n self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)\n self.com_watchdog_timer.start()\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.turn_right | python | def turn_right(self):
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed) | Make the drone rotate right. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L83-L85 | [
"def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n with self.lock:\n self.com_watchdog_timer.cancel()\n cmd(self.host, self.sequence, *args, **kwargs)\n self.sequence += 1\n self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)\n self.com_watchdog_timer.start()\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.reset | python | def reset(self):
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False) | Toggle the drone's emergency state. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L87-L91 | [
"def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n with self.lock:\n self.com_watchdog_timer.cancel()\n cmd(self.host, self.sequence, *args, **kwargs)\n self.sequence += 1\n self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)\n self.com_watchdog_timer.start()\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.at | python | def at(self, cmd, *args, **kwargs):
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start() | Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L111-L123 | [
"def ref(host, seq, takeoff, emergency=False):\n \"\"\"\n Basic behaviour of the drone: take-off/landing, emergency stop/reset)\n\n Parameters:\n seq -- sequence number\n takeoff -- True: Takeoff / False: Land\n emergency -- True: Turn off the engines\n \"\"\"\n p = 0b10001010101000000000000000000\n if takeoff:\n p |= 0b1000000000\n if emergency:\n p |= 0b100000000\n at(host, 'REF', seq, [p])\n",
"def config(host, seq, option, value):\n \"\"\"Set configuration parameters of the drone.\"\"\"\n at(host, 'CONFIG', seq, [str(option), str(value)])\n",
"def pcmd(host, seq, progressive, lr, fb, vv, va):\n \"\"\"\n Makes the drone move (translate/rotate).\n\n Parameters:\n seq -- sequence number\n progressive -- True: enable progressive commands, False: disable (i.e.\n enable hovering mode)\n lr -- left-right tilt: float [-1..1] negative: left, positive: right\n rb -- front-back tilt: float [-1..1] negative: forwards, positive:\n backwards\n vv -- vertical speed: float [-1..1] negative: go down, positive: rise\n va -- angular speed: float [-1..1] negative: spin left, positive: spin\n right\n\n The above float values are a percentage of the maximum speed.\n \"\"\"\n p = 1 if progressive else 0\n at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])\n",
"def ftrim(host, seq):\n \"\"\"\n Tell the drone it's lying horizontally.\n\n Parameters:\n seq -- sequence number\n \"\"\"\n at(host, 'FTRIM', seq, [])\n",
"def comwdg(host, seq):\n \"\"\"\n Reset communication watchdog.\n \"\"\"\n # FIXME: no sequence number\n at(host, 'COMWDG', seq, [])\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.halt | python | def halt(self):
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join() | Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L133-L146 | [
"def stop(self):\n \"\"\"Stop the IPCThread activity.\"\"\"\n self.stopping = True\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
|
fkmclane/python-ardrone | ardrone/drone.py | ARDrone.move | python | def move(self, lr, fb, vv, va):
self.at(ardrone.at.pcmd, True, lr, fb, vv, va) | Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L148-L158 | [
"def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n with self.lock:\n self.com_watchdog_timer.cancel()\n cmd(self.host, self.sequence, *args, **kwargs)\n self.sequence += 1\n self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)\n self.com_watchdog_timer.start()\n"
] | class ARDrone(object):
"""ARDrone Class.
Instantiate this class to control your drone and receive decoded video and
navdata.
"""
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
def takeoff(self):
"""Make the drone takeoff."""
self.at(ardrone.at.ref, True)
def land(self):
"""Make the drone land."""
self.at(ardrone.at.ref, False)
def hover(self):
"""Make the drone hover."""
self.at(ardrone.at.pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False)
def trim(self):
"""Flat trim the drone."""
self.at(ardrone.at.ftrim)
def set_cam(self, cam):
"""Set active camera.
Valid values are 0 for the front camera and 1 for the bottom camera
"""
self.at(ardrone.at.config, 'video:video_channel', cam)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start()
def commwdg(self):
"""Communication watchdog signal.
This needs to be sent regularly to keep the communication
with the drone alive.
"""
self.at(ardrone.at.comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join()
|
fkmclane/python-ardrone | ardrone/at.py | ref | python | def ref(host, seq, takeoff, emergency=False):
p = 0b10001010101000000000000000000
if takeoff:
p |= 0b1000000000
if emergency:
p |= 0b100000000
at(host, 'REF', seq, [p]) | Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L16-L30 | [
"def at(host, command, seq, params):\n \"\"\"\n Parameters:\n command -- the command\n seq -- the sequence number\n params -- a list of elements which can be either int, float or string\n \"\"\"\n params_str = []\n for p in params:\n if type(p) == int:\n params_str.append('{:d}'.format(p))\n elif type(p) == float:\n params_str.append('{:d}'.format(f2i(p)))\n elif type(p) == str:\n params_str.append('\"{:s}\"'.format(p))\n msg = 'AT*{:s}={:d},{:s}\\r'.format(command, seq, ','.join(params_str))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))\n"
] | import socket
import struct
import ardrone.constant
def f2i(f):
"""Interpret IEEE-754 floating-point value as signed integer.
Arguments:
f -- floating point value
"""
return struct.unpack('i', struct.pack('f', f))[0]
def pcmd(host, seq, progressive, lr, fb, vv, va):
"""
Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
"""
p = 1 if progressive else 0
at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])
def ftrim(host, seq):
"""
Tell the drone it's lying horizontally.
Parameters:
seq -- sequence number
"""
at(host, 'FTRIM', seq, [])
def zap(host, seq, stream):
"""
Selects which video stream to send on the video UDP port.
Parameters:
seq -- sequence number
stream -- Integer: video stream to broadcast
"""
# FIXME: improve parameters to select the modes directly
at(host, 'ZAP', seq, [stream])
def config(host, seq, option, value):
"""Set configuration parameters of the drone."""
at(host, 'CONFIG', seq, [str(option), str(value)])
def comwdg(host, seq):
"""
Reset communication watchdog.
"""
# FIXME: no sequence number
at(host, 'COMWDG', seq, [])
def aflight(host, seq, flag):
"""
Makes the drone fly autonomously.
Parameters:
seq -- sequence number
flag -- Integer: 1: start flight, 0: stop flight
"""
at(host, 'AFLIGHT', seq, [flag])
def pwm(host, seq, m1, m2, m3, m4):
"""
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- Integer: front left command
m2 -- Integer: front right command
m3 -- Integer: back right command
m4 -- Integer: back left command
"""
at(host, 'PWM', seq, [m1, m2, m3, m4])
def led(host, seq, anim, f, d):
"""
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- Float: frequency in HZ of the animation
d -- Integer: total duration in seconds of the animation
"""
at(host, 'LED', seq, [anim, float(f), d])
def anim(host, seq, anim, d):
"""
Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in seconds of the animation
"""
at(host, 'ANIM', seq, [anim, d])
def at(host, command, seq, params):
"""
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
"""
params_str = []
for p in params:
if type(p) == int:
params_str.append('{:d}'.format(p))
elif type(p) == float:
params_str.append('{:d}'.format(f2i(p)))
elif type(p) == str:
params_str.append('"{:s}"'.format(p))
msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
|
fkmclane/python-ardrone | ardrone/at.py | pcmd | python | def pcmd(host, seq, progressive, lr, fb, vv, va):
p = 1 if progressive else 0
at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)]) | Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L33-L51 | [
"def at(host, command, seq, params):\n \"\"\"\n Parameters:\n command -- the command\n seq -- the sequence number\n params -- a list of elements which can be either int, float or string\n \"\"\"\n params_str = []\n for p in params:\n if type(p) == int:\n params_str.append('{:d}'.format(p))\n elif type(p) == float:\n params_str.append('{:d}'.format(f2i(p)))\n elif type(p) == str:\n params_str.append('\"{:s}\"'.format(p))\n msg = 'AT*{:s}={:d},{:s}\\r'.format(command, seq, ','.join(params_str))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))\n"
] | import socket
import struct
import ardrone.constant
def f2i(f):
"""Interpret IEEE-754 floating-point value as signed integer.
Arguments:
f -- floating point value
"""
return struct.unpack('i', struct.pack('f', f))[0]
def ref(host, seq, takeoff, emergency=False):
"""
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines
"""
p = 0b10001010101000000000000000000
if takeoff:
p |= 0b1000000000
if emergency:
p |= 0b100000000
at(host, 'REF', seq, [p])
def ftrim(host, seq):
"""
Tell the drone it's lying horizontally.
Parameters:
seq -- sequence number
"""
at(host, 'FTRIM', seq, [])
def zap(host, seq, stream):
"""
Selects which video stream to send on the video UDP port.
Parameters:
seq -- sequence number
stream -- Integer: video stream to broadcast
"""
# FIXME: improve parameters to select the modes directly
at(host, 'ZAP', seq, [stream])
def config(host, seq, option, value):
"""Set configuration parameters of the drone."""
at(host, 'CONFIG', seq, [str(option), str(value)])
def comwdg(host, seq):
"""
Reset communication watchdog.
"""
# FIXME: no sequence number
at(host, 'COMWDG', seq, [])
def aflight(host, seq, flag):
"""
Makes the drone fly autonomously.
Parameters:
seq -- sequence number
flag -- Integer: 1: start flight, 0: stop flight
"""
at(host, 'AFLIGHT', seq, [flag])
def pwm(host, seq, m1, m2, m3, m4):
"""
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- Integer: front left command
m2 -- Integer: front right command
m3 -- Integer: back right command
m4 -- Integer: back left command
"""
at(host, 'PWM', seq, [m1, m2, m3, m4])
def led(host, seq, anim, f, d):
"""
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- Float: frequency in HZ of the animation
d -- Integer: total duration in seconds of the animation
"""
at(host, 'LED', seq, [anim, float(f), d])
def anim(host, seq, anim, d):
"""
Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in seconds of the animation
"""
at(host, 'ANIM', seq, [anim, d])
def at(host, command, seq, params):
"""
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
"""
params_str = []
for p in params:
if type(p) == int:
params_str.append('{:d}'.format(p))
elif type(p) == float:
params_str.append('{:d}'.format(f2i(p)))
elif type(p) == str:
params_str.append('"{:s}"'.format(p))
msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
|
fkmclane/python-ardrone | ardrone/at.py | config | python | def config(host, seq, option, value):
at(host, 'CONFIG', seq, [str(option), str(value)]) | Set configuration parameters of the drone. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L76-L78 | [
"def at(host, command, seq, params):\n \"\"\"\n Parameters:\n command -- the command\n seq -- the sequence number\n params -- a list of elements which can be either int, float or string\n \"\"\"\n params_str = []\n for p in params:\n if type(p) == int:\n params_str.append('{:d}'.format(p))\n elif type(p) == float:\n params_str.append('{:d}'.format(f2i(p)))\n elif type(p) == str:\n params_str.append('\"{:s}\"'.format(p))\n msg = 'AT*{:s}={:d},{:s}\\r'.format(command, seq, ','.join(params_str))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))\n"
] | import socket
import struct
import ardrone.constant
def f2i(f):
"""Interpret IEEE-754 floating-point value as signed integer.
Arguments:
f -- floating point value
"""
return struct.unpack('i', struct.pack('f', f))[0]
def ref(host, seq, takeoff, emergency=False):
"""
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines
"""
p = 0b10001010101000000000000000000
if takeoff:
p |= 0b1000000000
if emergency:
p |= 0b100000000
at(host, 'REF', seq, [p])
def pcmd(host, seq, progressive, lr, fb, vv, va):
"""
Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
"""
p = 1 if progressive else 0
at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])
def ftrim(host, seq):
"""
Tell the drone it's lying horizontally.
Parameters:
seq -- sequence number
"""
at(host, 'FTRIM', seq, [])
def zap(host, seq, stream):
"""
Selects which video stream to send on the video UDP port.
Parameters:
seq -- sequence number
stream -- Integer: video stream to broadcast
"""
# FIXME: improve parameters to select the modes directly
at(host, 'ZAP', seq, [stream])
def comwdg(host, seq):
"""
Reset communication watchdog.
"""
# FIXME: no sequence number
at(host, 'COMWDG', seq, [])
def aflight(host, seq, flag):
"""
Makes the drone fly autonomously.
Parameters:
seq -- sequence number
flag -- Integer: 1: start flight, 0: stop flight
"""
at(host, 'AFLIGHT', seq, [flag])
def pwm(host, seq, m1, m2, m3, m4):
"""
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- Integer: front left command
m2 -- Integer: front right command
m3 -- Integer: back right command
m4 -- Integer: back left command
"""
at(host, 'PWM', seq, [m1, m2, m3, m4])
def led(host, seq, anim, f, d):
"""
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- Float: frequency in HZ of the animation
d -- Integer: total duration in seconds of the animation
"""
at(host, 'LED', seq, [anim, float(f), d])
def anim(host, seq, anim, d):
"""
Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in seconds of the animation
"""
at(host, 'ANIM', seq, [anim, d])
def at(host, command, seq, params):
"""
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
"""
params_str = []
for p in params:
if type(p) == int:
params_str.append('{:d}'.format(p))
elif type(p) == float:
params_str.append('{:d}'.format(f2i(p)))
elif type(p) == str:
params_str.append('"{:s}"'.format(p))
msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
|
fkmclane/python-ardrone | ardrone/at.py | pwm | python | def pwm(host, seq, m1, m2, m3, m4):
at(host, 'PWM', seq, [m1, m2, m3, m4]) | Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- Integer: front left command
m2 -- Integer: front right command
m3 -- Integer: back right command
m4 -- Integer: back left command | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L100-L111 | [
"def at(host, command, seq, params):\n \"\"\"\n Parameters:\n command -- the command\n seq -- the sequence number\n params -- a list of elements which can be either int, float or string\n \"\"\"\n params_str = []\n for p in params:\n if type(p) == int:\n params_str.append('{:d}'.format(p))\n elif type(p) == float:\n params_str.append('{:d}'.format(f2i(p)))\n elif type(p) == str:\n params_str.append('\"{:s}\"'.format(p))\n msg = 'AT*{:s}={:d},{:s}\\r'.format(command, seq, ','.join(params_str))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))\n"
] | import socket
import struct
import ardrone.constant
def f2i(f):
"""Interpret IEEE-754 floating-point value as signed integer.
Arguments:
f -- floating point value
"""
return struct.unpack('i', struct.pack('f', f))[0]
def ref(host, seq, takeoff, emergency=False):
"""
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines
"""
p = 0b10001010101000000000000000000
if takeoff:
p |= 0b1000000000
if emergency:
p |= 0b100000000
at(host, 'REF', seq, [p])
def pcmd(host, seq, progressive, lr, fb, vv, va):
"""
Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
"""
p = 1 if progressive else 0
at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])
def ftrim(host, seq):
"""
Tell the drone it's lying horizontally.
Parameters:
seq -- sequence number
"""
at(host, 'FTRIM', seq, [])
def zap(host, seq, stream):
"""
Selects which video stream to send on the video UDP port.
Parameters:
seq -- sequence number
stream -- Integer: video stream to broadcast
"""
# FIXME: improve parameters to select the modes directly
at(host, 'ZAP', seq, [stream])
def config(host, seq, option, value):
"""Set configuration parameters of the drone."""
at(host, 'CONFIG', seq, [str(option), str(value)])
def comwdg(host, seq):
"""
Reset communication watchdog.
"""
# FIXME: no sequence number
at(host, 'COMWDG', seq, [])
def aflight(host, seq, flag):
"""
Makes the drone fly autonomously.
Parameters:
seq -- sequence number
flag -- Integer: 1: start flight, 0: stop flight
"""
at(host, 'AFLIGHT', seq, [flag])
def led(host, seq, anim, f, d):
"""
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- Float: frequency in HZ of the animation
d -- Integer: total duration in seconds of the animation
"""
at(host, 'LED', seq, [anim, float(f), d])
def anim(host, seq, anim, d):
"""
Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in seconds of the animation
"""
at(host, 'ANIM', seq, [anim, d])
def at(host, command, seq, params):
"""
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
"""
params_str = []
for p in params:
if type(p) == int:
params_str.append('{:d}'.format(p))
elif type(p) == float:
params_str.append('{:d}'.format(f2i(p)))
elif type(p) == str:
params_str.append('"{:s}"'.format(p))
msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
|
fkmclane/python-ardrone | ardrone/at.py | led | python | def led(host, seq, anim, f, d):
at(host, 'LED', seq, [anim, float(f), d]) | Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- Float: frequency in HZ of the animation
d -- Integer: total duration in seconds of the animation | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L114-L124 | [
"def at(host, command, seq, params):\n \"\"\"\n Parameters:\n command -- the command\n seq -- the sequence number\n params -- a list of elements which can be either int, float or string\n \"\"\"\n params_str = []\n for p in params:\n if type(p) == int:\n params_str.append('{:d}'.format(p))\n elif type(p) == float:\n params_str.append('{:d}'.format(f2i(p)))\n elif type(p) == str:\n params_str.append('\"{:s}\"'.format(p))\n msg = 'AT*{:s}={:d},{:s}\\r'.format(command, seq, ','.join(params_str))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))\n"
] | import socket
import struct
import ardrone.constant
def f2i(f):
"""Interpret IEEE-754 floating-point value as signed integer.
Arguments:
f -- floating point value
"""
return struct.unpack('i', struct.pack('f', f))[0]
def ref(host, seq, takeoff, emergency=False):
"""
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines
"""
p = 0b10001010101000000000000000000
if takeoff:
p |= 0b1000000000
if emergency:
p |= 0b100000000
at(host, 'REF', seq, [p])
def pcmd(host, seq, progressive, lr, fb, vv, va):
"""
Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
"""
p = 1 if progressive else 0
at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])
def ftrim(host, seq):
"""
Tell the drone it's lying horizontally.
Parameters:
seq -- sequence number
"""
at(host, 'FTRIM', seq, [])
def zap(host, seq, stream):
"""
Selects which video stream to send on the video UDP port.
Parameters:
seq -- sequence number
stream -- Integer: video stream to broadcast
"""
# FIXME: improve parameters to select the modes directly
at(host, 'ZAP', seq, [stream])
def config(host, seq, option, value):
"""Set configuration parameters of the drone."""
at(host, 'CONFIG', seq, [str(option), str(value)])
def comwdg(host, seq):
"""
Reset communication watchdog.
"""
# FIXME: no sequence number
at(host, 'COMWDG', seq, [])
def aflight(host, seq, flag):
"""
Makes the drone fly autonomously.
Parameters:
seq -- sequence number
flag -- Integer: 1: start flight, 0: stop flight
"""
at(host, 'AFLIGHT', seq, [flag])
def pwm(host, seq, m1, m2, m3, m4):
"""
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- Integer: front left command
m2 -- Integer: front right command
m3 -- Integer: back right command
m4 -- Integer: back left command
"""
at(host, 'PWM', seq, [m1, m2, m3, m4])
def anim(host, seq, anim, d):
"""
Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in seconds of the animation
"""
at(host, 'ANIM', seq, [anim, d])
def at(host, command, seq, params):
"""
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
"""
params_str = []
for p in params:
if type(p) == int:
params_str.append('{:d}'.format(p))
elif type(p) == float:
params_str.append('{:d}'.format(f2i(p)))
elif type(p) == str:
params_str.append('"{:s}"'.format(p))
msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
|
fkmclane/python-ardrone | ardrone/at.py | anim | python | def anim(host, seq, anim, d):
at(host, 'ANIM', seq, [anim, d]) | Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in seconds of the animation | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L127-L136 | [
"def at(host, command, seq, params):\n \"\"\"\n Parameters:\n command -- the command\n seq -- the sequence number\n params -- a list of elements which can be either int, float or string\n \"\"\"\n params_str = []\n for p in params:\n if type(p) == int:\n params_str.append('{:d}'.format(p))\n elif type(p) == float:\n params_str.append('{:d}'.format(f2i(p)))\n elif type(p) == str:\n params_str.append('\"{:s}\"'.format(p))\n msg = 'AT*{:s}={:d},{:s}\\r'.format(command, seq, ','.join(params_str))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))\n"
] | import socket
import struct
import ardrone.constant
def f2i(f):
"""Interpret IEEE-754 floating-point value as signed integer.
Arguments:
f -- floating point value
"""
return struct.unpack('i', struct.pack('f', f))[0]
def ref(host, seq, takeoff, emergency=False):
"""
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines
"""
p = 0b10001010101000000000000000000
if takeoff:
p |= 0b1000000000
if emergency:
p |= 0b100000000
at(host, 'REF', seq, [p])
def pcmd(host, seq, progressive, lr, fb, vv, va):
"""
Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
"""
p = 1 if progressive else 0
at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])
def ftrim(host, seq):
"""
Tell the drone it's lying horizontally.
Parameters:
seq -- sequence number
"""
at(host, 'FTRIM', seq, [])
def zap(host, seq, stream):
"""
Selects which video stream to send on the video UDP port.
Parameters:
seq -- sequence number
stream -- Integer: video stream to broadcast
"""
# FIXME: improve parameters to select the modes directly
at(host, 'ZAP', seq, [stream])
def config(host, seq, option, value):
"""Set configuration parameters of the drone."""
at(host, 'CONFIG', seq, [str(option), str(value)])
def comwdg(host, seq):
"""
Reset communication watchdog.
"""
# FIXME: no sequence number
at(host, 'COMWDG', seq, [])
def aflight(host, seq, flag):
"""
Makes the drone fly autonomously.
Parameters:
seq -- sequence number
flag -- Integer: 1: start flight, 0: stop flight
"""
at(host, 'AFLIGHT', seq, [flag])
def pwm(host, seq, m1, m2, m3, m4):
"""
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- Integer: front left command
m2 -- Integer: front right command
m3 -- Integer: back right command
m4 -- Integer: back left command
"""
at(host, 'PWM', seq, [m1, m2, m3, m4])
def led(host, seq, anim, f, d):
"""
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- Float: frequency in HZ of the animation
d -- Integer: total duration in seconds of the animation
"""
at(host, 'LED', seq, [anim, float(f), d])
def at(host, command, seq, params):
"""
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
"""
params_str = []
for p in params:
if type(p) == int:
params_str.append('{:d}'.format(p))
elif type(p) == float:
params_str.append('{:d}'.format(f2i(p)))
elif type(p) == str:
params_str.append('"{:s}"'.format(p))
msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
|
fkmclane/python-ardrone | ardrone/at.py | at | python | def at(host, command, seq, params):
params_str = []
for p in params:
if type(p) == int:
params_str.append('{:d}'.format(p))
elif type(p) == float:
params_str.append('{:d}'.format(f2i(p)))
elif type(p) == str:
params_str.append('"{:s}"'.format(p))
msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT)) | Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L138-L155 | [
"def f2i(f):\n \"\"\"Interpret IEEE-754 floating-point value as signed integer.\n\n Arguments:\n f -- floating point value\n \"\"\"\n return struct.unpack('i', struct.pack('f', f))[0]\n"
] | import socket
import struct
import ardrone.constant
def f2i(f):
"""Interpret IEEE-754 floating-point value as signed integer.
Arguments:
f -- floating point value
"""
return struct.unpack('i', struct.pack('f', f))[0]
def ref(host, seq, takeoff, emergency=False):
"""
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines
"""
p = 0b10001010101000000000000000000
if takeoff:
p |= 0b1000000000
if emergency:
p |= 0b100000000
at(host, 'REF', seq, [p])
def pcmd(host, seq, progressive, lr, fb, vv, va):
"""
Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
"""
p = 1 if progressive else 0
at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])
def ftrim(host, seq):
"""
Tell the drone it's lying horizontally.
Parameters:
seq -- sequence number
"""
at(host, 'FTRIM', seq, [])
def zap(host, seq, stream):
"""
Selects which video stream to send on the video UDP port.
Parameters:
seq -- sequence number
stream -- Integer: video stream to broadcast
"""
# FIXME: improve parameters to select the modes directly
at(host, 'ZAP', seq, [stream])
def config(host, seq, option, value):
"""Set configuration parameters of the drone."""
at(host, 'CONFIG', seq, [str(option), str(value)])
def comwdg(host, seq):
"""
Reset communication watchdog.
"""
# FIXME: no sequence number
at(host, 'COMWDG', seq, [])
def aflight(host, seq, flag):
"""
Makes the drone fly autonomously.
Parameters:
seq -- sequence number
flag -- Integer: 1: start flight, 0: stop flight
"""
at(host, 'AFLIGHT', seq, [flag])
def pwm(host, seq, m1, m2, m3, m4):
"""
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- Integer: front left command
m2 -- Integer: front right command
m3 -- Integer: back right command
m4 -- Integer: back left command
"""
at(host, 'PWM', seq, [m1, m2, m3, m4])
def led(host, seq, anim, f, d):
"""
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- Float: frequency in HZ of the animation
d -- Integer: total duration in seconds of the animation
"""
at(host, 'LED', seq, [anim, float(f), d])
def anim(host, seq, anim, d):
"""
Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in seconds of the animation
"""
at(host, 'ANIM', seq, [anim, d])
|
fkmclane/python-ardrone | ardrone/navdata.py | decode | python | def decode(packet):
offset = 0
_ = struct.unpack_from('IIII', packet, offset)
s = _[1]
state = dict()
state['fly'] = s & 1 # FLY MASK : (0) ardrone is landed, (1) ardrone is flying
state['video'] = s >> 1 & 1 # VIDEO MASK : (0) video disable, (1) video enable
state['vision'] = s >> 2 & 1 # VISION MASK : (0) vision disable, (1) vision enable
state['control'] = s >> 3 & 1 # CONTROL ALGO (0) euler angles control, (1) angular speed control
state['altitude'] = s >> 4 & 1 # ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active
state['user_feedback_start'] = s >> 5 & 1 # USER feedback : Start button state
state['command'] = s >> 6 & 1 # Control command ACK : (0) None, (1) one received
state['fw_file'] = s >> 7 & 1 # Firmware file is good (1)
state['fw_ver'] = s >> 8 & 1 # Firmware update is newer (1)
state['fw_upd'] = s >> 9 & 1 # Firmware update is ongoing (1)
state['navdata_demo'] = s >> 10 & 1 # Navdata demo : (0) All navdata, (1) only navdata demo
state['navdata_bootstrap'] = s >> 11 & 1 # Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent
state['motors'] = s >> 12 & 1 # Motor status : (0) Ok, (1) Motors problem
state['com_lost'] = s >> 13 & 1 # Communication lost : (1) com problem, (0) Com is ok
state['vbat_low'] = s >> 15 & 1 # VBat low : (1) too low, (0) Ok
state['user_el'] = s >> 16 & 1 # User Emergency Landing : (1) User EL is ON, (0) User EL is OFF
state['timer_elapsed'] = s >> 17 & 1 # Timer elapsed : (1) elapsed, (0) not elapsed
state['angles_out_of_range'] = s >> 19 & 1 # Angles : (0) Ok, (1) out of range
state['ultrasound'] = s >> 21 & 1 # Ultrasonic sensor : (0) Ok, (1) deaf
state['cutout'] = s >> 22 & 1 # Cutout system detection : (0) Not detected, (1) detected
state['pic_version'] = s >> 23 & 1 # PIC Version number OK : (0) a bad version number, (1) version number is OK
state['atcodec_thread_on'] = s >> 24 & 1 # ATCodec thread ON : (0) thread OFF (1) thread ON
state['navdata_thread_on'] = s >> 25 & 1 # Navdata thread ON : (0) thread OFF (1) thread ON
state['video_thread_on'] = s >> 26 & 1 # Video thread ON : (0) thread OFF (1) thread ON
state['acq_thread_on'] = s >> 27 & 1 # Acquisition thread ON : (0) thread OFF (1) thread ON
state['ctrl_watchdog'] = s >> 28 & 1 # CTRL watchdog : (1) delay in control execution (> 5ms), (0) control is well scheduled
state['adc_watchdog'] = s >> 29 & 1 # ADC Watchdog : (1) delay in uart2 dsr (> 5ms), (0) uart2 is good
state['com_watchdog'] = s >> 30 & 1 # Communication Watchdog : (1) com problem, (0) Com is ok
state['emergency'] = s >> 31 & 1 # Emergency landing : (0) no emergency, (1) emergency
data = dict()
data['state'] = state
data['header'] = _[0]
data['sequence'] = _[2]
data['vision'] = _[3]
offset += struct.calcsize('IIII')
demo_fields = [
'ctrl_state',
'battery',
'theta',
'phi',
'psi',
'altitude',
'vx',
'vy',
'vz',
'num_frames'
]
angles = ['theta', 'phi', 'psi']
while True:
try:
id_nr, size = struct.unpack_from('HH', packet, offset)
offset += struct.calcsize('HH')
except struct.error:
break
values = []
for i in range(size - struct.calcsize('HH')):
values.append(struct.unpack_from('c', packet, offset)[0])
offset += struct.calcsize('c')
if id_nr == 0:
values = struct.unpack_from('IIfffIfffI', b''.join(values))
demo = dict(zip(demo_fields, values))
for a in angles:
demo[a] = int(demo[a] / 1000)
data['demo'] = demo
return data | Decode a navdata packet. | train | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/navdata.py#L4-L82 | null | import struct
|
federico123579/Trading212-API | tradingAPI/low_level.py | LowLevelAPI.launch | python | def launch(self):
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True | launch browser and virtual display, first of all to be launched | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L73-L88 | null | class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
|
federico123579/Trading212-API | tradingAPI/low_level.py | LowLevelAPI.css | python | def css(self, css_path, dom=None):
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path]) | css find function abbreviation | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L90-L94 | [
"def expect(func, args, times=7, sleep_t=0.5):\n \"\"\"try many times as in times with sleep time\"\"\"\n while times > 0:\n try:\n return func(*args)\n except Exception as e:\n times -= 1\n logger.debug(\"expect failed - attempts left: %d\" % times)\n time.sleep(sleep_t)\n if times == 0:\n raise exceptions.BaseExc(e)\n"
] | class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
|
federico123579/Trading212-API | tradingAPI/low_level.py | LowLevelAPI.css1 | python | def css1(self, css_path, dom=None):
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom]) | return the first value of self.css | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L96-L105 | [
"def expect(func, args, times=7, sleep_t=0.5):\n \"\"\"try many times as in times with sleep time\"\"\"\n while times > 0:\n try:\n return func(*args)\n except Exception as e:\n times -= 1\n logger.debug(\"expect failed - attempts left: %d\" % times)\n time.sleep(sleep_t)\n if times == 0:\n raise exceptions.BaseExc(e)\n"
] | class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
|
federico123579/Trading212-API | tradingAPI/low_level.py | LowLevelAPI.search_name | python | def search_name(self, name, dom=None):
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name]) | name find function abbreviation | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L107-L111 | [
"def expect(func, args, times=7, sleep_t=0.5):\n \"\"\"try many times as in times with sleep time\"\"\"\n while times > 0:\n try:\n return func(*args)\n except Exception as e:\n times -= 1\n logger.debug(\"expect failed - attempts left: %d\" % times)\n time.sleep(sleep_t)\n if times == 0:\n raise exceptions.BaseExc(e)\n"
] | class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
|
federico123579/Trading212-API | tradingAPI/low_level.py | LowLevelAPI.xpath | python | def xpath(self, xpath, dom=None):
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath]) | xpath find function abbreviation | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L113-L117 | [
"def expect(func, args, times=7, sleep_t=0.5):\n \"\"\"try many times as in times with sleep time\"\"\"\n while times > 0:\n try:\n return func(*args)\n except Exception as e:\n times -= 1\n logger.debug(\"expect failed - attempts left: %d\" % times)\n time.sleep(sleep_t)\n if times == 0:\n raise exceptions.BaseExc(e)\n"
] | class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
|
federico123579/Trading212-API | tradingAPI/low_level.py | LowLevelAPI.elCss | python | def elCss(self, css_path, dom=None):
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path]) | check if element is present by css | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L119-L123 | [
"def expect(func, args, times=7, sleep_t=0.5):\n \"\"\"try many times as in times with sleep time\"\"\"\n while times > 0:\n try:\n return func(*args)\n except Exception as e:\n times -= 1\n logger.debug(\"expect failed - attempts left: %d\" % times)\n time.sleep(sleep_t)\n if times == 0:\n raise exceptions.BaseExc(e)\n"
] | class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
|
federico123579/Trading212-API | tradingAPI/low_level.py | LowLevelAPI.elXpath | python | def elXpath(self, xpath, dom=None):
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath]) | check if element is present by css | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L125-L129 | [
"def expect(func, args, times=7, sleep_t=0.5):\n \"\"\"try many times as in times with sleep time\"\"\"\n while times > 0:\n try:\n return func(*args)\n except Exception as e:\n times -= 1\n logger.debug(\"expect failed - attempts left: %d\" % times)\n time.sleep(sleep_t)\n if times == 0:\n raise exceptions.BaseExc(e)\n"
] | class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
|
federico123579/Trading212-API | tradingAPI/low_level.py | LowLevelAPI.login | python | def login(self, username, password, mode="demo"):
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True | login function | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L131-L166 | [
"def css1(self, css_path, dom=None):\n \"\"\"return the first value of self.css\"\"\"\n if dom is None:\n dom = self.browser\n\n def _css1(path, domm):\n \"\"\"virtual local func\"\"\"\n return self.css(path, domm)[0]\n\n return expect(_css1, args=[css_path, dom])\n",
"def search_name(self, name, dom=None):\n \"\"\"name find function abbreviation\"\"\"\n if dom is None:\n dom = self.browser\n return expect(dom.find_by_name, args=[name])\n",
"def elCss(self, css_path, dom=None):\n \"\"\"check if element is present by css\"\"\"\n if dom is None:\n dom = self.browser\n return expect(dom.is_element_present_by_css, args=[css_path])\n"
] | class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
|
federico123579/Trading212-API | tradingAPI/low_level.py | LowLevelAPI.logout | python | def logout(self):
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True | logout func (quit browser) | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L168-L177 | null | class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
|
federico123579/Trading212-API | tradingAPI/low_level.py | LowLevelAPI.new_pos | python | def new_pos(self, html_div):
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos | factory method pattern | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L526-L531 | null | class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
|
federico123579/Trading212-API | tradingAPI/api.py | API.addMov | python | def addMov(self, product, quantity=None, mode="buy", stop_limit=None,
auto_margin=None, name_counter=None):
# ~ ARGS ~
if (not isinstance(product, type('')) or
(not isinstance(name_counter, type('')) and
name_counter is not None)):
raise ValueError('product and name_counter have to be a string')
if not isinstance(stop_limit, type({})) and stop_limit is not None:
raise ValueError('it has to be a dictionary')
# exclusive args
if quantity is not None and auto_margin is not None:
raise ValueError("quantity and auto_margin are exclusive")
elif quantity is None and auto_margin is None:
raise ValueError("need at least one quantity")
# ~ MAIN ~
# open new window
mov = self.new_mov(product)
mov.open()
mov.set_mode(mode)
# set quantity
if quantity is not None:
mov.set_quantity(quantity)
# for best performance in long times
try:
margin = mov.get_unit_value() * quantity
except TimeoutError:
mov.close()
logger.warning("market closed for %s" % mov.product)
return False
# auto_margin calculate quantity (how simple!)
elif auto_margin is not None:
unit_value = mov.get_unit_value()
mov.set_quantity(auto_margin * unit_value)
margin = auto_margin
# stop limit (how can be so simple!)
if stop_limit is not None:
mov.set_limit('gain', stop_limit['gain'][0], stop_limit['gain'][1])
mov.set_limit('loss', stop_limit['loss'][0], stop_limit['loss'][1])
# confirm
try:
mov.confirm()
except (exceptions.MaxQuantLimit, exceptions.MinQuantLimit) as e:
logger.warning(e.err)
# resolve immediately
mov.set_quantity(e.quant)
mov.confirm()
except Exception:
logger.exception('undefined error in movement confirmation')
mov_logger.info(f"added {mov.product} movement of {mov.quantity} " +
f"with margin of {margin}")
mov_logger.debug(f"stop_limit: {stop_limit}") | main function for placing movements
stop_limit = {'gain': [mode, value], 'loss': [mode, value]} | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/api.py#L20-L72 | [
"def new_mov(self, name):\n \"\"\"factory method pattern\"\"\"\n return self.MovementWindow(self, name)\n"
] | class API(LowLevelAPI):
"""Interface object"""
def __init__(self, brow='firefox'):
super().__init__(brow)
self.preferences = []
self.stocks = []
# return {'margin': margin, 'name': name}
def checkPos(self):
"""check all positions"""
soup = BeautifulSoup(self.css1(path['movs-table']).html, 'html.parser')
poss = []
for label in soup.find_all("tr"):
pos_id = label['id']
# init an empty list
# check if it already exist
pos_list = [x for x in self.positions if x.id == pos_id]
if pos_list:
# and update it
pos = pos_list[0]
pos.update(label)
else:
pos = self.new_pos(label)
pos.get_gain()
poss.append(pos)
# remove old positions
self.positions.clear()
self.positions.extend(poss)
logger.debug("%d positions update" % len(poss))
return self.positions
def checkStock(self):
"""check stocks in preference"""
if not self.preferences:
logger.debug("no preferences")
return None
soup = BeautifulSoup(
self.xpath(path['stock-table'])[0].html, "html.parser")
count = 0
# iterate through product in left panel
for product in soup.select("div.tradebox"):
prod_name = product.select("span.instrument-name")[0].text
stk_name = [x for x in self.preferences
if x.lower() in prod_name.lower()]
if not stk_name:
continue
name = prod_name
if not [x for x in self.stocks if x.product == name]:
self.stocks.append(Stock(name))
stock = [x for x in self.stocks if x.product == name][0]
if 'tradebox-market-closed' in product['class']:
stock.market = False
if not stock.market:
logger.debug("market closed for %s" % stock.product)
continue
sell_price = product.select("div.tradebox-price-sell")[0].text
buy_price = product.select("div.tradebox-price-buy")[0].text
sent = int(product.select(path['sent'])[0].text.strip('%')) / 100
stock.new_rec([sell_price, buy_price, sent])
count += 1
logger.debug(f"added %d stocks" % count)
return self.stocks
def addPrefs(self, prefs=[]):
self.preferences.extend(prefs)
def clearPrefs(self):
"""clear the left panel and preferences"""
self.preferences.clear()
tradebox_num = len(self.css('div.tradebox'))
for i in range(tradebox_num):
self.xpath(path['trade-box'])[0].right_click()
self.css1('div.item-trade-contextmenu-list-remove').click()
logger.info("cleared preferences")
def addPrefs(self, prefs=[]):
"""add preference in self.preferences"""
if len(prefs) == len(self.preferences) == 0:
logger.debug("no preferences")
return None
self.preferences.extend(prefs)
self.css1(path['search-btn']).click()
count = 0
for pref in self.preferences:
self.css1(path['search-pref']).fill(pref)
self.css1(path['pref-icon']).click()
btn = self.css1('div.add-to-watchlist-popup-item .icon-wrapper')
if not self.css1('svg', btn)['class'] is None:
btn.click()
count += 1
# remove window
self.css1(path['pref-icon']).click()
# close finally
self.css1(path['back-btn']).click()
self.css1(path['back-btn']).click()
logger.debug("updated %d preferences" % count)
return self.preferences
|
federico123579/Trading212-API | tradingAPI/api.py | API.checkPos | python | def checkPos(self):
soup = BeautifulSoup(self.css1(path['movs-table']).html, 'html.parser')
poss = []
for label in soup.find_all("tr"):
pos_id = label['id']
# init an empty list
# check if it already exist
pos_list = [x for x in self.positions if x.id == pos_id]
if pos_list:
# and update it
pos = pos_list[0]
pos.update(label)
else:
pos = self.new_pos(label)
pos.get_gain()
poss.append(pos)
# remove old positions
self.positions.clear()
self.positions.extend(poss)
logger.debug("%d positions update" % len(poss))
return self.positions | check all positions | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/api.py#L76-L97 | [
"def css1(self, css_path, dom=None):\n \"\"\"return the first value of self.css\"\"\"\n if dom is None:\n dom = self.browser\n\n def _css1(path, domm):\n \"\"\"virtual local func\"\"\"\n return self.css(path, domm)[0]\n\n return expect(_css1, args=[css_path, dom])\n",
"def new_pos(self, html_div):\n \"\"\"factory method pattern\"\"\"\n pos = self.Position(self, html_div)\n pos.bind_mov()\n self.positions.append(pos)\n return pos\n"
] | class API(LowLevelAPI):
"""Interface object"""
def __init__(self, brow='firefox'):
super().__init__(brow)
self.preferences = []
self.stocks = []
def addMov(self, product, quantity=None, mode="buy", stop_limit=None,
auto_margin=None, name_counter=None):
"""main function for placing movements
stop_limit = {'gain': [mode, value], 'loss': [mode, value]}"""
# ~ ARGS ~
if (not isinstance(product, type('')) or
(not isinstance(name_counter, type('')) and
name_counter is not None)):
raise ValueError('product and name_counter have to be a string')
if not isinstance(stop_limit, type({})) and stop_limit is not None:
raise ValueError('it has to be a dictionary')
# exclusive args
if quantity is not None and auto_margin is not None:
raise ValueError("quantity and auto_margin are exclusive")
elif quantity is None and auto_margin is None:
raise ValueError("need at least one quantity")
# ~ MAIN ~
# open new window
mov = self.new_mov(product)
mov.open()
mov.set_mode(mode)
# set quantity
if quantity is not None:
mov.set_quantity(quantity)
# for best performance in long times
try:
margin = mov.get_unit_value() * quantity
except TimeoutError:
mov.close()
logger.warning("market closed for %s" % mov.product)
return False
# auto_margin calculate quantity (how simple!)
elif auto_margin is not None:
unit_value = mov.get_unit_value()
mov.set_quantity(auto_margin * unit_value)
margin = auto_margin
# stop limit (how can be so simple!)
if stop_limit is not None:
mov.set_limit('gain', stop_limit['gain'][0], stop_limit['gain'][1])
mov.set_limit('loss', stop_limit['loss'][0], stop_limit['loss'][1])
# confirm
try:
mov.confirm()
except (exceptions.MaxQuantLimit, exceptions.MinQuantLimit) as e:
logger.warning(e.err)
# resolve immediately
mov.set_quantity(e.quant)
mov.confirm()
except Exception:
logger.exception('undefined error in movement confirmation')
mov_logger.info(f"added {mov.product} movement of {mov.quantity} " +
f"with margin of {margin}")
mov_logger.debug(f"stop_limit: {stop_limit}")
# return {'margin': margin, 'name': name}
def checkStock(self):
"""check stocks in preference"""
if not self.preferences:
logger.debug("no preferences")
return None
soup = BeautifulSoup(
self.xpath(path['stock-table'])[0].html, "html.parser")
count = 0
# iterate through product in left panel
for product in soup.select("div.tradebox"):
prod_name = product.select("span.instrument-name")[0].text
stk_name = [x for x in self.preferences
if x.lower() in prod_name.lower()]
if not stk_name:
continue
name = prod_name
if not [x for x in self.stocks if x.product == name]:
self.stocks.append(Stock(name))
stock = [x for x in self.stocks if x.product == name][0]
if 'tradebox-market-closed' in product['class']:
stock.market = False
if not stock.market:
logger.debug("market closed for %s" % stock.product)
continue
sell_price = product.select("div.tradebox-price-sell")[0].text
buy_price = product.select("div.tradebox-price-buy")[0].text
sent = int(product.select(path['sent'])[0].text.strip('%')) / 100
stock.new_rec([sell_price, buy_price, sent])
count += 1
logger.debug(f"added %d stocks" % count)
return self.stocks
def addPrefs(self, prefs=[]):
self.preferences.extend(prefs)
def clearPrefs(self):
"""clear the left panel and preferences"""
self.preferences.clear()
tradebox_num = len(self.css('div.tradebox'))
for i in range(tradebox_num):
self.xpath(path['trade-box'])[0].right_click()
self.css1('div.item-trade-contextmenu-list-remove').click()
logger.info("cleared preferences")
def addPrefs(self, prefs=[]):
"""add preference in self.preferences"""
if len(prefs) == len(self.preferences) == 0:
logger.debug("no preferences")
return None
self.preferences.extend(prefs)
self.css1(path['search-btn']).click()
count = 0
for pref in self.preferences:
self.css1(path['search-pref']).fill(pref)
self.css1(path['pref-icon']).click()
btn = self.css1('div.add-to-watchlist-popup-item .icon-wrapper')
if not self.css1('svg', btn)['class'] is None:
btn.click()
count += 1
# remove window
self.css1(path['pref-icon']).click()
# close finally
self.css1(path['back-btn']).click()
self.css1(path['back-btn']).click()
logger.debug("updated %d preferences" % count)
return self.preferences
|
federico123579/Trading212-API | tradingAPI/api.py | API.checkStock | python | def checkStock(self):
if not self.preferences:
logger.debug("no preferences")
return None
soup = BeautifulSoup(
self.xpath(path['stock-table'])[0].html, "html.parser")
count = 0
# iterate through product in left panel
for product in soup.select("div.tradebox"):
prod_name = product.select("span.instrument-name")[0].text
stk_name = [x for x in self.preferences
if x.lower() in prod_name.lower()]
if not stk_name:
continue
name = prod_name
if not [x for x in self.stocks if x.product == name]:
self.stocks.append(Stock(name))
stock = [x for x in self.stocks if x.product == name][0]
if 'tradebox-market-closed' in product['class']:
stock.market = False
if not stock.market:
logger.debug("market closed for %s" % stock.product)
continue
sell_price = product.select("div.tradebox-price-sell")[0].text
buy_price = product.select("div.tradebox-price-buy")[0].text
sent = int(product.select(path['sent'])[0].text.strip('%')) / 100
stock.new_rec([sell_price, buy_price, sent])
count += 1
logger.debug(f"added %d stocks" % count)
return self.stocks | check stocks in preference | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/api.py#L99-L129 | [
"def xpath(self, xpath, dom=None):\n \"\"\"xpath find function abbreviation\"\"\"\n if dom is None:\n dom = self.browser\n return expect(dom.find_by_xpath, args=[xpath])\n"
] | class API(LowLevelAPI):
"""Interface object"""
def __init__(self, brow='firefox'):
super().__init__(brow)
self.preferences = []
self.stocks = []
def addMov(self, product, quantity=None, mode="buy", stop_limit=None,
auto_margin=None, name_counter=None):
"""main function for placing movements
stop_limit = {'gain': [mode, value], 'loss': [mode, value]}"""
# ~ ARGS ~
if (not isinstance(product, type('')) or
(not isinstance(name_counter, type('')) and
name_counter is not None)):
raise ValueError('product and name_counter have to be a string')
if not isinstance(stop_limit, type({})) and stop_limit is not None:
raise ValueError('it has to be a dictionary')
# exclusive args
if quantity is not None and auto_margin is not None:
raise ValueError("quantity and auto_margin are exclusive")
elif quantity is None and auto_margin is None:
raise ValueError("need at least one quantity")
# ~ MAIN ~
# open new window
mov = self.new_mov(product)
mov.open()
mov.set_mode(mode)
# set quantity
if quantity is not None:
mov.set_quantity(quantity)
# for best performance in long times
try:
margin = mov.get_unit_value() * quantity
except TimeoutError:
mov.close()
logger.warning("market closed for %s" % mov.product)
return False
# auto_margin calculate quantity (how simple!)
elif auto_margin is not None:
unit_value = mov.get_unit_value()
mov.set_quantity(auto_margin * unit_value)
margin = auto_margin
# stop limit (how can be so simple!)
if stop_limit is not None:
mov.set_limit('gain', stop_limit['gain'][0], stop_limit['gain'][1])
mov.set_limit('loss', stop_limit['loss'][0], stop_limit['loss'][1])
# confirm
try:
mov.confirm()
except (exceptions.MaxQuantLimit, exceptions.MinQuantLimit) as e:
logger.warning(e.err)
# resolve immediately
mov.set_quantity(e.quant)
mov.confirm()
except Exception:
logger.exception('undefined error in movement confirmation')
mov_logger.info(f"added {mov.product} movement of {mov.quantity} " +
f"with margin of {margin}")
mov_logger.debug(f"stop_limit: {stop_limit}")
# return {'margin': margin, 'name': name}
def checkPos(self):
"""check all positions"""
soup = BeautifulSoup(self.css1(path['movs-table']).html, 'html.parser')
poss = []
for label in soup.find_all("tr"):
pos_id = label['id']
# init an empty list
# check if it already exist
pos_list = [x for x in self.positions if x.id == pos_id]
if pos_list:
# and update it
pos = pos_list[0]
pos.update(label)
else:
pos = self.new_pos(label)
pos.get_gain()
poss.append(pos)
# remove old positions
self.positions.clear()
self.positions.extend(poss)
logger.debug("%d positions update" % len(poss))
return self.positions
def addPrefs(self, prefs=[]):
self.preferences.extend(prefs)
def clearPrefs(self):
"""clear the left panel and preferences"""
self.preferences.clear()
tradebox_num = len(self.css('div.tradebox'))
for i in range(tradebox_num):
self.xpath(path['trade-box'])[0].right_click()
self.css1('div.item-trade-contextmenu-list-remove').click()
logger.info("cleared preferences")
def addPrefs(self, prefs=[]):
"""add preference in self.preferences"""
if len(prefs) == len(self.preferences) == 0:
logger.debug("no preferences")
return None
self.preferences.extend(prefs)
self.css1(path['search-btn']).click()
count = 0
for pref in self.preferences:
self.css1(path['search-pref']).fill(pref)
self.css1(path['pref-icon']).click()
btn = self.css1('div.add-to-watchlist-popup-item .icon-wrapper')
if not self.css1('svg', btn)['class'] is None:
btn.click()
count += 1
# remove window
self.css1(path['pref-icon']).click()
# close finally
self.css1(path['back-btn']).click()
self.css1(path['back-btn']).click()
logger.debug("updated %d preferences" % count)
return self.preferences
|
federico123579/Trading212-API | tradingAPI/api.py | API.clearPrefs | python | def clearPrefs(self):
self.preferences.clear()
tradebox_num = len(self.css('div.tradebox'))
for i in range(tradebox_num):
self.xpath(path['trade-box'])[0].right_click()
self.css1('div.item-trade-contextmenu-list-remove').click()
logger.info("cleared preferences") | clear the left panel and preferences | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/api.py#L134-L141 | [
"def css(self, css_path, dom=None):\n \"\"\"css find function abbreviation\"\"\"\n if dom is None:\n dom = self.browser\n return expect(dom.find_by_css, args=[css_path])\n",
"def css1(self, css_path, dom=None):\n \"\"\"return the first value of self.css\"\"\"\n if dom is None:\n dom = self.browser\n\n def _css1(path, domm):\n \"\"\"virtual local func\"\"\"\n return self.css(path, domm)[0]\n\n return expect(_css1, args=[css_path, dom])\n",
"def xpath(self, xpath, dom=None):\n \"\"\"xpath find function abbreviation\"\"\"\n if dom is None:\n dom = self.browser\n return expect(dom.find_by_xpath, args=[xpath])\n"
] | class API(LowLevelAPI):
"""Interface object"""
def __init__(self, brow='firefox'):
super().__init__(brow)
self.preferences = []
self.stocks = []
def addMov(self, product, quantity=None, mode="buy", stop_limit=None,
auto_margin=None, name_counter=None):
"""main function for placing movements
stop_limit = {'gain': [mode, value], 'loss': [mode, value]}"""
# ~ ARGS ~
if (not isinstance(product, type('')) or
(not isinstance(name_counter, type('')) and
name_counter is not None)):
raise ValueError('product and name_counter have to be a string')
if not isinstance(stop_limit, type({})) and stop_limit is not None:
raise ValueError('it has to be a dictionary')
# exclusive args
if quantity is not None and auto_margin is not None:
raise ValueError("quantity and auto_margin are exclusive")
elif quantity is None and auto_margin is None:
raise ValueError("need at least one quantity")
# ~ MAIN ~
# open new window
mov = self.new_mov(product)
mov.open()
mov.set_mode(mode)
# set quantity
if quantity is not None:
mov.set_quantity(quantity)
# for best performance in long times
try:
margin = mov.get_unit_value() * quantity
except TimeoutError:
mov.close()
logger.warning("market closed for %s" % mov.product)
return False
# auto_margin calculate quantity (how simple!)
elif auto_margin is not None:
unit_value = mov.get_unit_value()
mov.set_quantity(auto_margin * unit_value)
margin = auto_margin
# stop limit (how can be so simple!)
if stop_limit is not None:
mov.set_limit('gain', stop_limit['gain'][0], stop_limit['gain'][1])
mov.set_limit('loss', stop_limit['loss'][0], stop_limit['loss'][1])
# confirm
try:
mov.confirm()
except (exceptions.MaxQuantLimit, exceptions.MinQuantLimit) as e:
logger.warning(e.err)
# resolve immediately
mov.set_quantity(e.quant)
mov.confirm()
except Exception:
logger.exception('undefined error in movement confirmation')
mov_logger.info(f"added {mov.product} movement of {mov.quantity} " +
f"with margin of {margin}")
mov_logger.debug(f"stop_limit: {stop_limit}")
# return {'margin': margin, 'name': name}
def checkPos(self):
"""check all positions"""
soup = BeautifulSoup(self.css1(path['movs-table']).html, 'html.parser')
poss = []
for label in soup.find_all("tr"):
pos_id = label['id']
# init an empty list
# check if it already exist
pos_list = [x for x in self.positions if x.id == pos_id]
if pos_list:
# and update it
pos = pos_list[0]
pos.update(label)
else:
pos = self.new_pos(label)
pos.get_gain()
poss.append(pos)
# remove old positions
self.positions.clear()
self.positions.extend(poss)
logger.debug("%d positions update" % len(poss))
return self.positions
def checkStock(self):
"""check stocks in preference"""
if not self.preferences:
logger.debug("no preferences")
return None
soup = BeautifulSoup(
self.xpath(path['stock-table'])[0].html, "html.parser")
count = 0
# iterate through product in left panel
for product in soup.select("div.tradebox"):
prod_name = product.select("span.instrument-name")[0].text
stk_name = [x for x in self.preferences
if x.lower() in prod_name.lower()]
if not stk_name:
continue
name = prod_name
if not [x for x in self.stocks if x.product == name]:
self.stocks.append(Stock(name))
stock = [x for x in self.stocks if x.product == name][0]
if 'tradebox-market-closed' in product['class']:
stock.market = False
if not stock.market:
logger.debug("market closed for %s" % stock.product)
continue
sell_price = product.select("div.tradebox-price-sell")[0].text
buy_price = product.select("div.tradebox-price-buy")[0].text
sent = int(product.select(path['sent'])[0].text.strip('%')) / 100
stock.new_rec([sell_price, buy_price, sent])
count += 1
logger.debug(f"added %d stocks" % count)
return self.stocks
def addPrefs(self, prefs=[]):
self.preferences.extend(prefs)
def addPrefs(self, prefs=[]):
"""add preference in self.preferences"""
if len(prefs) == len(self.preferences) == 0:
logger.debug("no preferences")
return None
self.preferences.extend(prefs)
self.css1(path['search-btn']).click()
count = 0
for pref in self.preferences:
self.css1(path['search-pref']).fill(pref)
self.css1(path['pref-icon']).click()
btn = self.css1('div.add-to-watchlist-popup-item .icon-wrapper')
if not self.css1('svg', btn)['class'] is None:
btn.click()
count += 1
# remove window
self.css1(path['pref-icon']).click()
# close finally
self.css1(path['back-btn']).click()
self.css1(path['back-btn']).click()
logger.debug("updated %d preferences" % count)
return self.preferences
|
federico123579/Trading212-API | tradingAPI/api.py | API.addPrefs | python | def addPrefs(self, prefs=[]):
if len(prefs) == len(self.preferences) == 0:
logger.debug("no preferences")
return None
self.preferences.extend(prefs)
self.css1(path['search-btn']).click()
count = 0
for pref in self.preferences:
self.css1(path['search-pref']).fill(pref)
self.css1(path['pref-icon']).click()
btn = self.css1('div.add-to-watchlist-popup-item .icon-wrapper')
if not self.css1('svg', btn)['class'] is None:
btn.click()
count += 1
# remove window
self.css1(path['pref-icon']).click()
# close finally
self.css1(path['back-btn']).click()
self.css1(path['back-btn']).click()
logger.debug("updated %d preferences" % count)
return self.preferences | add preference in self.preferences | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/api.py#L143-L164 | [
"def css1(self, css_path, dom=None):\n \"\"\"return the first value of self.css\"\"\"\n if dom is None:\n dom = self.browser\n\n def _css1(path, domm):\n \"\"\"virtual local func\"\"\"\n return self.css(path, domm)[0]\n\n return expect(_css1, args=[css_path, dom])\n"
] | class API(LowLevelAPI):
"""Interface object"""
def __init__(self, brow='firefox'):
super().__init__(brow)
self.preferences = []
self.stocks = []
def addMov(self, product, quantity=None, mode="buy", stop_limit=None,
auto_margin=None, name_counter=None):
"""main function for placing movements
stop_limit = {'gain': [mode, value], 'loss': [mode, value]}"""
# ~ ARGS ~
if (not isinstance(product, type('')) or
(not isinstance(name_counter, type('')) and
name_counter is not None)):
raise ValueError('product and name_counter have to be a string')
if not isinstance(stop_limit, type({})) and stop_limit is not None:
raise ValueError('it has to be a dictionary')
# exclusive args
if quantity is not None and auto_margin is not None:
raise ValueError("quantity and auto_margin are exclusive")
elif quantity is None and auto_margin is None:
raise ValueError("need at least one quantity")
# ~ MAIN ~
# open new window
mov = self.new_mov(product)
mov.open()
mov.set_mode(mode)
# set quantity
if quantity is not None:
mov.set_quantity(quantity)
# for best performance in long times
try:
margin = mov.get_unit_value() * quantity
except TimeoutError:
mov.close()
logger.warning("market closed for %s" % mov.product)
return False
# auto_margin calculate quantity (how simple!)
elif auto_margin is not None:
unit_value = mov.get_unit_value()
mov.set_quantity(auto_margin * unit_value)
margin = auto_margin
# stop limit (how can be so simple!)
if stop_limit is not None:
mov.set_limit('gain', stop_limit['gain'][0], stop_limit['gain'][1])
mov.set_limit('loss', stop_limit['loss'][0], stop_limit['loss'][1])
# confirm
try:
mov.confirm()
except (exceptions.MaxQuantLimit, exceptions.MinQuantLimit) as e:
logger.warning(e.err)
# resolve immediately
mov.set_quantity(e.quant)
mov.confirm()
except Exception:
logger.exception('undefined error in movement confirmation')
mov_logger.info(f"added {mov.product} movement of {mov.quantity} " +
f"with margin of {margin}")
mov_logger.debug(f"stop_limit: {stop_limit}")
# return {'margin': margin, 'name': name}
def checkPos(self):
"""check all positions"""
soup = BeautifulSoup(self.css1(path['movs-table']).html, 'html.parser')
poss = []
for label in soup.find_all("tr"):
pos_id = label['id']
# init an empty list
# check if it already exist
pos_list = [x for x in self.positions if x.id == pos_id]
if pos_list:
# and update it
pos = pos_list[0]
pos.update(label)
else:
pos = self.new_pos(label)
pos.get_gain()
poss.append(pos)
# remove old positions
self.positions.clear()
self.positions.extend(poss)
logger.debug("%d positions update" % len(poss))
return self.positions
def checkStock(self):
"""check stocks in preference"""
if not self.preferences:
logger.debug("no preferences")
return None
soup = BeautifulSoup(
self.xpath(path['stock-table'])[0].html, "html.parser")
count = 0
# iterate through product in left panel
for product in soup.select("div.tradebox"):
prod_name = product.select("span.instrument-name")[0].text
stk_name = [x for x in self.preferences
if x.lower() in prod_name.lower()]
if not stk_name:
continue
name = prod_name
if not [x for x in self.stocks if x.product == name]:
self.stocks.append(Stock(name))
stock = [x for x in self.stocks if x.product == name][0]
if 'tradebox-market-closed' in product['class']:
stock.market = False
if not stock.market:
logger.debug("market closed for %s" % stock.product)
continue
sell_price = product.select("div.tradebox-price-sell")[0].text
buy_price = product.select("div.tradebox-price-buy")[0].text
sent = int(product.select(path['sent'])[0].text.strip('%')) / 100
stock.new_rec([sell_price, buy_price, sent])
count += 1
logger.debug(f"added %d stocks" % count)
return self.stocks
def addPrefs(self, prefs=[]):
self.preferences.extend(prefs)
def clearPrefs(self):
"""clear the left panel and preferences"""
self.preferences.clear()
tradebox_num = len(self.css('div.tradebox'))
for i in range(tradebox_num):
self.xpath(path['trade-box'])[0].right_click()
self.css1('div.item-trade-contextmenu-list-remove').click()
logger.info("cleared preferences")
|
federico123579/Trading212-API | tradingAPI/utils.py | expect | python | def expect(func, args, times=7, sleep_t=0.5):
while times > 0:
try:
return func(*args)
except Exception as e:
times -= 1
logger.debug("expect failed - attempts left: %d" % times)
time.sleep(sleep_t)
if times == 0:
raise exceptions.BaseExc(e) | try many times as in times with sleep time | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/utils.py#L20-L30 | [
"def _css1(path, domm):\n \"\"\"virtual local func\"\"\"\n return self.css(path, domm)[0]\n"
] | # -*- coding: utf-8 -*-
"""
tradingAPI.utils
~~~~~~~~~~~~~~
This module provides utility functions.
"""
import time
import re
from tradingAPI import exceptions
from .glob import Glob
# logging
import logging
logger = logging.getLogger('tradingAPI.utils')
def num(string):
"""convert a string to float"""
if not isinstance(string, type('')):
raise ValueError(type(''))
try:
string = re.sub('[^a-zA-Z0-9\.\-]', '', string)
number = re.findall(r"[-+]?\d*\.\d+|[-+]?\d+", string)
return float(number[0])
except Exception as e:
logger = logging.getLogger('tradingAPI.utils.num')
logger.debug("number not found in %s" % string)
logger.debug(e)
return None
def get_number_unit(number):
"""get the unit of number"""
n = str(float(number))
mult, submult = n.split('.')
if float(submult) != 0:
unit = '0.' + (len(submult)-1)*'0' + '1'
return float(unit)
else:
return float(1)
def get_pip(mov=None, api=None, name=None):
"""get value of pip"""
# ~ check args
if mov is None and api is None:
logger.error("need at least one of those")
raise ValueError()
elif mov is not None and api is not None:
logger.error("mov and api are exclusive")
raise ValueError()
if api is not None:
if name is None:
logger.error("need a name")
raise ValueError()
mov = api.new_mov(name)
mov.open()
if mov is not None:
mov._check_open()
# find in the collection
try:
logger.debug(len(Glob().theCollector.collection))
pip = Glob().theCollector.collection['pip']
if name is not None:
pip_res = pip[name]
elif mov is not None:
pip_res = pip[mov.product]
logger.debug("pip found in the collection")
return pip_res
except KeyError:
logger.debug("pip not found in the collection")
# ~ vars
records = []
intervals = [10, 20, 30]
def _check_price(interval=10):
timeout = time.time() + interval
while time.time() < timeout:
records.append(mov.get_price())
time.sleep(0.5)
# find variation
for interval in intervals:
_check_price(interval)
if min(records) == max(records):
logger.debug("no variation in %d seconds" % interval)
if interval == intervals[-1]:
raise TimeoutError("no variation")
else:
break
# find longer price
for price in records:
if 'best_price' not in locals():
best_price = price
if len(str(price)) > len(str(best_price)):
logger.debug("found new best_price %f" % price)
best_price = price
# get pip
pip = get_number_unit(best_price)
Glob().pipHandler.add_val({mov.product: pip})
return pip
|
federico123579/Trading212-API | tradingAPI/utils.py | num | python | def num(string):
if not isinstance(string, type('')):
raise ValueError(type(''))
try:
string = re.sub('[^a-zA-Z0-9\.\-]', '', string)
number = re.findall(r"[-+]?\d*\.\d+|[-+]?\d+", string)
return float(number[0])
except Exception as e:
logger = logging.getLogger('tradingAPI.utils.num')
logger.debug("number not found in %s" % string)
logger.debug(e)
return None | convert a string to float | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/utils.py#L33-L45 | null | # -*- coding: utf-8 -*-
"""
tradingAPI.utils
~~~~~~~~~~~~~~
This module provides utility functions.
"""
import time
import re
from tradingAPI import exceptions
from .glob import Glob
# logging
import logging
logger = logging.getLogger('tradingAPI.utils')
def expect(func, args, times=7, sleep_t=0.5):
"""try many times as in times with sleep time"""
while times > 0:
try:
return func(*args)
except Exception as e:
times -= 1
logger.debug("expect failed - attempts left: %d" % times)
time.sleep(sleep_t)
if times == 0:
raise exceptions.BaseExc(e)
def get_number_unit(number):
"""get the unit of number"""
n = str(float(number))
mult, submult = n.split('.')
if float(submult) != 0:
unit = '0.' + (len(submult)-1)*'0' + '1'
return float(unit)
else:
return float(1)
def get_pip(mov=None, api=None, name=None):
"""get value of pip"""
# ~ check args
if mov is None and api is None:
logger.error("need at least one of those")
raise ValueError()
elif mov is not None and api is not None:
logger.error("mov and api are exclusive")
raise ValueError()
if api is not None:
if name is None:
logger.error("need a name")
raise ValueError()
mov = api.new_mov(name)
mov.open()
if mov is not None:
mov._check_open()
# find in the collection
try:
logger.debug(len(Glob().theCollector.collection))
pip = Glob().theCollector.collection['pip']
if name is not None:
pip_res = pip[name]
elif mov is not None:
pip_res = pip[mov.product]
logger.debug("pip found in the collection")
return pip_res
except KeyError:
logger.debug("pip not found in the collection")
# ~ vars
records = []
intervals = [10, 20, 30]
def _check_price(interval=10):
timeout = time.time() + interval
while time.time() < timeout:
records.append(mov.get_price())
time.sleep(0.5)
# find variation
for interval in intervals:
_check_price(interval)
if min(records) == max(records):
logger.debug("no variation in %d seconds" % interval)
if interval == intervals[-1]:
raise TimeoutError("no variation")
else:
break
# find longer price
for price in records:
if 'best_price' not in locals():
best_price = price
if len(str(price)) > len(str(best_price)):
logger.debug("found new best_price %f" % price)
best_price = price
# get pip
pip = get_number_unit(best_price)
Glob().pipHandler.add_val({mov.product: pip})
return pip
|
federico123579/Trading212-API | tradingAPI/utils.py | get_number_unit | python | def get_number_unit(number):
n = str(float(number))
mult, submult = n.split('.')
if float(submult) != 0:
unit = '0.' + (len(submult)-1)*'0' + '1'
return float(unit)
else:
return float(1) | get the unit of number | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/utils.py#L48-L56 | null | # -*- coding: utf-8 -*-
"""
tradingAPI.utils
~~~~~~~~~~~~~~
This module provides utility functions.
"""
import time
import re
from tradingAPI import exceptions
from .glob import Glob
# logging
import logging
logger = logging.getLogger('tradingAPI.utils')
def expect(func, args, times=7, sleep_t=0.5):
"""try many times as in times with sleep time"""
while times > 0:
try:
return func(*args)
except Exception as e:
times -= 1
logger.debug("expect failed - attempts left: %d" % times)
time.sleep(sleep_t)
if times == 0:
raise exceptions.BaseExc(e)
def num(string):
"""convert a string to float"""
if not isinstance(string, type('')):
raise ValueError(type(''))
try:
string = re.sub('[^a-zA-Z0-9\.\-]', '', string)
number = re.findall(r"[-+]?\d*\.\d+|[-+]?\d+", string)
return float(number[0])
except Exception as e:
logger = logging.getLogger('tradingAPI.utils.num')
logger.debug("number not found in %s" % string)
logger.debug(e)
return None
def get_pip(mov=None, api=None, name=None):
"""get value of pip"""
# ~ check args
if mov is None and api is None:
logger.error("need at least one of those")
raise ValueError()
elif mov is not None and api is not None:
logger.error("mov and api are exclusive")
raise ValueError()
if api is not None:
if name is None:
logger.error("need a name")
raise ValueError()
mov = api.new_mov(name)
mov.open()
if mov is not None:
mov._check_open()
# find in the collection
try:
logger.debug(len(Glob().theCollector.collection))
pip = Glob().theCollector.collection['pip']
if name is not None:
pip_res = pip[name]
elif mov is not None:
pip_res = pip[mov.product]
logger.debug("pip found in the collection")
return pip_res
except KeyError:
logger.debug("pip not found in the collection")
# ~ vars
records = []
intervals = [10, 20, 30]
def _check_price(interval=10):
timeout = time.time() + interval
while time.time() < timeout:
records.append(mov.get_price())
time.sleep(0.5)
# find variation
for interval in intervals:
_check_price(interval)
if min(records) == max(records):
logger.debug("no variation in %d seconds" % interval)
if interval == intervals[-1]:
raise TimeoutError("no variation")
else:
break
# find longer price
for price in records:
if 'best_price' not in locals():
best_price = price
if len(str(price)) > len(str(best_price)):
logger.debug("found new best_price %f" % price)
best_price = price
# get pip
pip = get_number_unit(best_price)
Glob().pipHandler.add_val({mov.product: pip})
return pip
|
federico123579/Trading212-API | tradingAPI/utils.py | get_pip | python | def get_pip(mov=None, api=None, name=None):
# ~ check args
if mov is None and api is None:
logger.error("need at least one of those")
raise ValueError()
elif mov is not None and api is not None:
logger.error("mov and api are exclusive")
raise ValueError()
if api is not None:
if name is None:
logger.error("need a name")
raise ValueError()
mov = api.new_mov(name)
mov.open()
if mov is not None:
mov._check_open()
# find in the collection
try:
logger.debug(len(Glob().theCollector.collection))
pip = Glob().theCollector.collection['pip']
if name is not None:
pip_res = pip[name]
elif mov is not None:
pip_res = pip[mov.product]
logger.debug("pip found in the collection")
return pip_res
except KeyError:
logger.debug("pip not found in the collection")
# ~ vars
records = []
intervals = [10, 20, 30]
def _check_price(interval=10):
timeout = time.time() + interval
while time.time() < timeout:
records.append(mov.get_price())
time.sleep(0.5)
# find variation
for interval in intervals:
_check_price(interval)
if min(records) == max(records):
logger.debug("no variation in %d seconds" % interval)
if interval == intervals[-1]:
raise TimeoutError("no variation")
else:
break
# find longer price
for price in records:
if 'best_price' not in locals():
best_price = price
if len(str(price)) > len(str(best_price)):
logger.debug("found new best_price %f" % price)
best_price = price
# get pip
pip = get_number_unit(best_price)
Glob().pipHandler.add_val({mov.product: pip})
return pip | get value of pip | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/utils.py#L59-L117 | [
"def get_number_unit(number):\n \"\"\"get the unit of number\"\"\"\n n = str(float(number))\n mult, submult = n.split('.')\n if float(submult) != 0:\n unit = '0.' + (len(submult)-1)*'0' + '1'\n return float(unit)\n else:\n return float(1)\n",
"def _check_price(interval=10):\n timeout = time.time() + interval\n while time.time() < timeout:\n records.append(mov.get_price())\n time.sleep(0.5)\n",
"def _check_open(self):\n if self.state == 'open':\n return True\n else:\n raise exceptions.WindowException()\n"
] | # -*- coding: utf-8 -*-
"""
tradingAPI.utils
~~~~~~~~~~~~~~
This module provides utility functions.
"""
import time
import re
from tradingAPI import exceptions
from .glob import Glob
# logging
import logging
logger = logging.getLogger('tradingAPI.utils')
def expect(func, args, times=7, sleep_t=0.5):
"""try many times as in times with sleep time"""
while times > 0:
try:
return func(*args)
except Exception as e:
times -= 1
logger.debug("expect failed - attempts left: %d" % times)
time.sleep(sleep_t)
if times == 0:
raise exceptions.BaseExc(e)
def num(string):
"""convert a string to float"""
if not isinstance(string, type('')):
raise ValueError(type(''))
try:
string = re.sub('[^a-zA-Z0-9\.\-]', '', string)
number = re.findall(r"[-+]?\d*\.\d+|[-+]?\d+", string)
return float(number[0])
except Exception as e:
logger = logging.getLogger('tradingAPI.utils.num')
logger.debug("number not found in %s" % string)
logger.debug(e)
return None
def get_number_unit(number):
"""get the unit of number"""
n = str(float(number))
mult, submult = n.split('.')
if float(submult) != 0:
unit = '0.' + (len(submult)-1)*'0' + '1'
return float(unit)
else:
return float(1)
|
federico123579/Trading212-API | tradingAPI/saver.py | Saver.add_val | python | def add_val(self, val):
if not isinstance(val, type({})):
raise ValueError(type({}))
self.read()
self.config.update(val)
self.save() | add value in form of dict | train | https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/saver.py#L55-L61 | [
"def read(self):\n self.checkFile()\n with open(self.config_file, 'r') as f:\n yaml_dict = yaml.load(f)\n logger.debug('yaml: ' + str(yaml_dict))\n if yaml_dict is not None:\n self.config = yaml_dict\n self.notify_observers(event='update', data=self.config)\n return self.config\n",
"def save(self):\n self.checkFile()\n if not self.config:\n logger.error(\"nothing to save (config not exists)\")\n raise NotImplemented()\n with open(self.config_file, 'w') as f:\n f.write(yaml.dump(self.config))\n logger.debug(\"saved data\")\n"
] | class Saver(Observable):
"""save data"""
def __init__(self, path, name):
self._observers = []
self.name = name
self.config_file = path
self.config = {}
def read(self):
self.checkFile()
with open(self.config_file, 'r') as f:
yaml_dict = yaml.load(f)
logger.debug('yaml: ' + str(yaml_dict))
if yaml_dict is not None:
self.config = yaml_dict
self.notify_observers(event='update', data=self.config)
return self.config
def save(self):
self.checkFile()
if not self.config:
logger.error("nothing to save (config not exists)")
raise NotImplemented()
with open(self.config_file, 'w') as f:
f.write(yaml.dump(self.config))
logger.debug("saved data")
def checkFile(self):
if not os.path.isfile(self.config_file):
directory = os.path.dirname(self.config_file)
if not os.path.exists(directory):
os.makedirs(directory)
with open(self.config_file, 'w') as f:
pass
|
Tygs/ww | src/ww/wrappers/lists.py | ListWrapper.join | python | def join(self, joiner, formatter=lambda s, t: t.format(s),
template="{}"):
return ww.s(joiner).join(self, formatter, template) | Join values and convert to string
Example:
>>> from ww import l
>>> lst = l('012')
>>> lst.join(',')
u'0,1,2'
>>> lst.join(',', template="{}#")
u'0#,1#,2#'
>>> string = lst.join(',',\
formatter = lambda x, y: str(int(x) ** 2))
>>> string
u'0,1,4' | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/wrappers/lists.py#L33-L51 | null | class ListWrapper(list):
@property
def len(self):
"""Return object length
Example:
>>> from ww import l
>>> lst = l([0, 1, 2, 3])
>>> lst.len
4
"""
return len(self)
def append(self, *values):
"""Append values at the end of the list
Allow chaining.
Args:
values: values to be appened at the end.
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.append(1)
[1]
>>> lst
[1]
>>> lst.append(2, 3).append(4,5)
[1, 2, 3, 4, 5]
>>> lst
[1, 2, 3, 4, 5]
"""
for value in values:
list.append(self, value)
return self
def extend(self, *iterables):
"""Add all values of all iterables at the end of the list
Args:
iterables: iterable which content to add at the end
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.extend([1, 2])
[1, 2]
>>> lst
[1, 2]
>>> lst.extend([3, 4]).extend([5, 6])
[1, 2, 3, 4, 5, 6]
>>> lst
[1, 2, 3, 4, 5, 6]
"""
for value in iterables:
list.extend(self, value)
return self
|
Tygs/ww | src/ww/wrappers/lists.py | ListWrapper.append | python | def append(self, *values):
for value in values:
list.append(self, value)
return self | Append values at the end of the list
Allow chaining.
Args:
values: values to be appened at the end.
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.append(1)
[1]
>>> lst
[1]
>>> lst.append(2, 3).append(4,5)
[1, 2, 3, 4, 5]
>>> lst
[1, 2, 3, 4, 5] | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/wrappers/lists.py#L53-L77 | null | class ListWrapper(list):
@property
def len(self):
"""Return object length
Example:
>>> from ww import l
>>> lst = l([0, 1, 2, 3])
>>> lst.len
4
"""
return len(self)
def join(self, joiner, formatter=lambda s, t: t.format(s),
template="{}"):
"""Join values and convert to string
Example:
>>> from ww import l
>>> lst = l('012')
>>> lst.join(',')
u'0,1,2'
>>> lst.join(',', template="{}#")
u'0#,1#,2#'
>>> string = lst.join(',',\
formatter = lambda x, y: str(int(x) ** 2))
>>> string
u'0,1,4'
"""
return ww.s(joiner).join(self, formatter, template)
def extend(self, *iterables):
"""Add all values of all iterables at the end of the list
Args:
iterables: iterable which content to add at the end
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.extend([1, 2])
[1, 2]
>>> lst
[1, 2]
>>> lst.extend([3, 4]).extend([5, 6])
[1, 2, 3, 4, 5, 6]
>>> lst
[1, 2, 3, 4, 5, 6]
"""
for value in iterables:
list.extend(self, value)
return self
|
Tygs/ww | src/ww/wrappers/lists.py | ListWrapper.extend | python | def extend(self, *iterables):
for value in iterables:
list.extend(self, value)
return self | Add all values of all iterables at the end of the list
Args:
iterables: iterable which content to add at the end
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.extend([1, 2])
[1, 2]
>>> lst
[1, 2]
>>> lst.extend([3, 4]).extend([5, 6])
[1, 2, 3, 4, 5, 6]
>>> lst
[1, 2, 3, 4, 5, 6] | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/wrappers/lists.py#L79-L101 | null | class ListWrapper(list):
@property
def len(self):
"""Return object length
Example:
>>> from ww import l
>>> lst = l([0, 1, 2, 3])
>>> lst.len
4
"""
return len(self)
def join(self, joiner, formatter=lambda s, t: t.format(s),
template="{}"):
"""Join values and convert to string
Example:
>>> from ww import l
>>> lst = l('012')
>>> lst.join(',')
u'0,1,2'
>>> lst.join(',', template="{}#")
u'0#,1#,2#'
>>> string = lst.join(',',\
formatter = lambda x, y: str(int(x) ** 2))
>>> string
u'0,1,4'
"""
return ww.s(joiner).join(self, formatter, template)
def append(self, *values):
"""Append values at the end of the list
Allow chaining.
Args:
values: values to be appened at the end.
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.append(1)
[1]
>>> lst
[1]
>>> lst.append(2, 3).append(4,5)
[1, 2, 3, 4, 5]
>>> lst
[1, 2, 3, 4, 5]
"""
for value in values:
list.append(self, value)
return self
|
Tygs/ww | src/ww/tools/iterables.py | starts_when | python | def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable) | Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9] | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L49-L71 | null | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable)
def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
"""
Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')]
"""
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise
# TODO: test that on big iterators to check for recursion limit
def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields items from an iterator in iterable chunks.
"""
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1)))
def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)]
"""
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d
def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"""" Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index)
"""
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e)
# TODO: accept a default value if not value is found
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e)
def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
""" Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True.
"""
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the first x items from this iterable or default. """
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default
def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the last x items from this iterable or default. """
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
|
Tygs/ww | src/ww/tools/iterables.py | stops_when | python | def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable) | Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6] | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L74-L96 | null | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable)
def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
"""
Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')]
"""
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise
# TODO: test that on big iterators to check for recursion limit
def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields items from an iterator in iterable chunks.
"""
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1)))
def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)]
"""
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d
def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"""" Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index)
"""
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e)
# TODO: accept a default value if not value is found
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e)
def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
""" Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True.
"""
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the first x items from this iterable or default. """
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default
def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the last x items from this iterable or default. """
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
|
Tygs/ww | src/ww/tools/iterables.py | skip_duplicates | python | def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise | Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')] | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L99-L179 | [
"def skip_duplicates(self, key=lambda x: x, fingerprints=None):\n"
] | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable)
def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable)
# TODO: test that on big iterators to check for recursion limit
def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields items from an iterator in iterable chunks.
"""
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1)))
def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)]
"""
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d
def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"""" Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index)
"""
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e)
# TODO: accept a default value if not value is found
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e)
def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
""" Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True.
"""
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the first x items from this iterable or default. """
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default
def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the last x items from this iterable or default. """
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
|
Tygs/ww | src/ww/tools/iterables.py | chunks | python | def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1))) | Yields items from an iterator in iterable chunks. | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L183-L191 | null | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable)
def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable)
def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
"""
Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')]
"""
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise
# TODO: test that on big iterators to check for recursion limit
def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)]
"""
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d
def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"""" Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index)
"""
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e)
# TODO: accept a default value if not value is found
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e)
def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
""" Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True.
"""
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the first x items from this iterable or default. """
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default
def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the last x items from this iterable or default. """
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
|
Tygs/ww | src/ww/tools/iterables.py | window | python | def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d | Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)] | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L194-L228 | null | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable)
def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable)
def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
"""
Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')]
"""
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise
# TODO: test that on big iterators to check for recursion limit
def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields items from an iterator in iterable chunks.
"""
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1)))
def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"""" Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index)
"""
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e)
# TODO: accept a default value if not value is found
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e)
def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
""" Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True.
"""
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the first x items from this iterable or default. """
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default
def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the last x items from this iterable or default. """
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
|
Tygs/ww | src/ww/tools/iterables.py | at_index | python | def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e) | Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index) | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L231-L246 | null | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable)
def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable)
def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
"""
Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')]
"""
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise
# TODO: test that on big iterators to check for recursion limit
def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields items from an iterator in iterable chunks.
"""
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1)))
def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)]
"""
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d
# TODO: accept a default value if not value is found
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e)
def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
""" Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True.
"""
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the first x items from this iterable or default. """
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default
def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the last x items from this iterable or default. """
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
|
Tygs/ww | src/ww/tools/iterables.py | first_true | python | def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e) | Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators. | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L250-L262 | null | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable)
def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable)
def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
"""
Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')]
"""
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise
# TODO: test that on big iterators to check for recursion limit
def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields items from an iterator in iterable chunks.
"""
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1)))
def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)]
"""
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d
def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"""" Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index)
"""
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e)
# TODO: accept a default value if not value is found
def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
""" Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True.
"""
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the first x items from this iterable or default. """
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default
def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the last x items from this iterable or default. """
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
|
Tygs/ww | src/ww/tools/iterables.py | iterslice | python | def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step) | Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True. | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L265-L293 | [
"def starts_when(iterable, condition):\n # type: (Iterable, Union[Callable, Any]) -> Iterable\n \"\"\"Start yielding items when a condition arise.\n\n Args:\n iterable: the iterable to filter.\n condition: if the callable returns True once, start yielding\n items. If it's not a callable, it will be converted\n to one as `lambda condition: condition == item`.\n\n Example:\n\n >>> list(starts_when(range(10), lambda x: x > 5))\n [6, 7, 8, 9]\n >>> list(starts_when(range(10), 7))\n [7, 8, 9]\n \"\"\"\n if not callable(condition):\n cond_value = condition\n\n def condition(x):\n return x == cond_value\n return itertools.dropwhile(lambda x: not condition(x), iterable)\n",
"def stops_when(iterable, condition):\n # type: (Iterable, Union[Callable, Any]) -> Iterable\n \"\"\"Stop yielding items when a condition arise.\n\n Args:\n iterable: the iterable to filter.\n condition: if the callable returns True once, stop yielding\n items. If it's not a callable, it will be converted\n to one as `lambda condition: condition == item`.\n\n Example:\n\n >>> list(stops_when(range(10), lambda x: x > 5))\n [0, 1, 2, 3, 4, 5]\n >>> list(stops_when(range(10), 7))\n [0, 1, 2, 3, 4, 5, 6]\n \"\"\"\n if not callable(condition):\n cond_value = condition\n\n def condition(x):\n return x == cond_value\n return itertools.takewhile(lambda x: not condition(x), iterable)\n"
] | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable)
def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable)
def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
"""
Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')]
"""
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise
# TODO: test that on big iterators to check for recursion limit
def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields items from an iterator in iterable chunks.
"""
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1)))
def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)]
"""
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d
def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"""" Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index)
"""
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e)
# TODO: accept a default value if not value is found
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the first x items from this iterable or default. """
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default
def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the last x items from this iterable or default. """
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
|
Tygs/ww | src/ww/tools/iterables.py | firsts | python | def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default | Lazily return the first x items from this iterable or default. | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L309-L330 | null | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable)
def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable)
def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
"""
Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')]
"""
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise
# TODO: test that on big iterators to check for recursion limit
def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields items from an iterator in iterable chunks.
"""
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1)))
def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)]
"""
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d
def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"""" Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index)
"""
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e)
# TODO: accept a default value if not value is found
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e)
def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
""" Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True.
"""
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the last x items from this iterable or default. """
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
|
Tygs/ww | src/ww/tools/iterables.py | lasts | python | def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y | Lazily return the last x items from this iterable or default. | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/iterables.py#L333-L343 | null | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable)
def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable)
def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
"""
Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')]
"""
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise
# TODO: test that on big iterators to check for recursion limit
def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields items from an iterator in iterable chunks.
"""
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1)))
def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)]
"""
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d
def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"""" Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index)
"""
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e)
# TODO: accept a default value if not value is found
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e)
def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
""" Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True.
"""
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the first x items from this iterable or default. """
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
|
Tygs/ww | src/ww/wrappers/dicts.py | DictWrapper.isubset | python | def isubset(self, *keys):
# type: (*Hashable) -> ww.g
return ww.g((key, self[key]) for key in keys) | Return key, self[key] as generator for key in keys.
Raise KeyError if a key does not exist
Args:
keys: Iterable containing keys
Example:
>>> from ww import d
>>> list(d({1: 1, 2: 2, 3: 3}).isubset(1, 3))
[(1, 1), (3, 3)] | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/wrappers/dicts.py#L18-L33 | null | class DictWrapper(dict):
def subset(self, *keys):
# type: (*Hashable) -> DictWrapper
"""Return d(key, self[key]) for key in keys.
Raise KeyError if a key does not exist
Args:
keys: Iterable containing keys
Example:
>>> from ww import d
>>> d({1: 1, 2: 2, 3: 3}).subset(1,3)
{1: 1, 3: 3}
"""
return self.__class__(self.isubset(*keys))
def swap(self):
# type: () -> DictWrapper
"""Swap key and value
/!\ Be carreful, if there are duplicate values, only one will
survive /!\
Example:
>>> from ww import d
>>> d({1: 2, 2: 2, 3: 3}).swap()
{2: 2, 3: 3}
"""
return self.__class__((v, k) for k, v in self.items())
def add(self, key, value):
# type: (Hashable, Any) -> DictWrapper
"""Add value in key
Args:
key: Key
value: Value to associate to the key
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> current_dict.add(4, 5)
{1: 1, 2: 2, 3: 3, 4: 5}
"""
self[key] = value
return self
def __iter__(self):
"""Return the inner iterator
Returns: iterator
"""
return iter(ww.g(self.items()))
@classmethod
def fromkeys(cls, iterable, value=None):
# TODO : type: (Iterable, Union[Any, Callable]) -> DictWrapper
# https://github.com/python/mypy/issues/2254
"""Create a new d from
Args:
iterable: Iterable containing keys
value: value to associate with each key.
If callable, will be value[key]
Returns: new DictWrapper
Example:
>>> from ww import d
>>> sorted(d.fromkeys('123', value=4).items())
[('1', 4), ('2', 4), ('3', 4)]
>>> sorted(d.fromkeys(range(3), value=lambda e:e**2).items())
[(0, 0), (1, 1), (2, 4)]
"""
if not callable(value):
return cls(dict.fromkeys(iterable, value))
return cls((key, value(key)) for key in iterable)
def merge(self, other_dict):
# type: (dict) -> DictWrapper
"""Merge self with other_dict
Args:
other_dict: dict to merge with self
Returns: merged dict
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> to_merge_dict = d({3: 4, 4: 5})
>>> current_dict.merge(to_merge_dict)
{1: 1, 2: 2, 3: 4, 4: 5}
"""
self.update(other_dict)
return self
def delete(self, *keys):
# type: (*Hashable) -> DictWrapper
"""Delete keys from dict
Args:
*keys: Iterable containing keys to delete
Returns: self
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> current_dict.delete(*[1,2])
{3: 3}
"""
for key in keys:
self.pop(key, None)
return self
def __add__(self, other):
# type: (dict) -> DictWrapper
"""Add other in self and return new dict
Args:
other: dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> to_merge_dict = {3: 4, 4: 5}
>>> current_dict + to_merge_dict
{1: 1, 2: 2, 3: 4, 4: 5}
"""
copy = self.__class__(self.copy())
return copy.merge(other)
def __radd__(self, other):
# type: (dict) -> DictWrapper
"""Add other in self, and return new dict
Args:
other: dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = {1: 1, 2: 2, 3: 3}
>>> to_merge_dict = d({3: 4, 4: 5})
>>> current_dict + to_merge_dict
{1: 1, 2: 2, 3: 4, 4: 5}
"""
copy = self.__class__(other.copy())
return copy.merge(self)
def __iadd__(self, other):
# type: (dict) -> DictWrapper
"""Add other in self
Args:
other: Dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> current_dict += {5: 6, 6: 7}
>>> current_dict
{1: 1, 2: 2, 3: 3, 5: 6, 6: 7}
"""
return self.merge(other)
|
Tygs/ww | src/ww/wrappers/dicts.py | DictWrapper.swap | python | def swap(self):
# type: () -> DictWrapper
return self.__class__((v, k) for k, v in self.items()) | Swap key and value
/!\ Be carreful, if there are duplicate values, only one will
survive /!\
Example:
>>> from ww import d
>>> d({1: 2, 2: 2, 3: 3}).swap()
{2: 2, 3: 3} | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/wrappers/dicts.py#L52-L65 | null | class DictWrapper(dict):
def isubset(self, *keys):
# type: (*Hashable) -> ww.g
"""Return key, self[key] as generator for key in keys.
Raise KeyError if a key does not exist
Args:
keys: Iterable containing keys
Example:
>>> from ww import d
>>> list(d({1: 1, 2: 2, 3: 3}).isubset(1, 3))
[(1, 1), (3, 3)]
"""
return ww.g((key, self[key]) for key in keys)
def subset(self, *keys):
# type: (*Hashable) -> DictWrapper
"""Return d(key, self[key]) for key in keys.
Raise KeyError if a key does not exist
Args:
keys: Iterable containing keys
Example:
>>> from ww import d
>>> d({1: 1, 2: 2, 3: 3}).subset(1,3)
{1: 1, 3: 3}
"""
return self.__class__(self.isubset(*keys))
def add(self, key, value):
# type: (Hashable, Any) -> DictWrapper
"""Add value in key
Args:
key: Key
value: Value to associate to the key
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> current_dict.add(4, 5)
{1: 1, 2: 2, 3: 3, 4: 5}
"""
self[key] = value
return self
def __iter__(self):
"""Return the inner iterator
Returns: iterator
"""
return iter(ww.g(self.items()))
@classmethod
def fromkeys(cls, iterable, value=None):
# TODO : type: (Iterable, Union[Any, Callable]) -> DictWrapper
# https://github.com/python/mypy/issues/2254
"""Create a new d from
Args:
iterable: Iterable containing keys
value: value to associate with each key.
If callable, will be value[key]
Returns: new DictWrapper
Example:
>>> from ww import d
>>> sorted(d.fromkeys('123', value=4).items())
[('1', 4), ('2', 4), ('3', 4)]
>>> sorted(d.fromkeys(range(3), value=lambda e:e**2).items())
[(0, 0), (1, 1), (2, 4)]
"""
if not callable(value):
return cls(dict.fromkeys(iterable, value))
return cls((key, value(key)) for key in iterable)
def merge(self, other_dict):
# type: (dict) -> DictWrapper
"""Merge self with other_dict
Args:
other_dict: dict to merge with self
Returns: merged dict
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> to_merge_dict = d({3: 4, 4: 5})
>>> current_dict.merge(to_merge_dict)
{1: 1, 2: 2, 3: 4, 4: 5}
"""
self.update(other_dict)
return self
def delete(self, *keys):
# type: (*Hashable) -> DictWrapper
"""Delete keys from dict
Args:
*keys: Iterable containing keys to delete
Returns: self
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> current_dict.delete(*[1,2])
{3: 3}
"""
for key in keys:
self.pop(key, None)
return self
def __add__(self, other):
# type: (dict) -> DictWrapper
"""Add other in self and return new dict
Args:
other: dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> to_merge_dict = {3: 4, 4: 5}
>>> current_dict + to_merge_dict
{1: 1, 2: 2, 3: 4, 4: 5}
"""
copy = self.__class__(self.copy())
return copy.merge(other)
def __radd__(self, other):
# type: (dict) -> DictWrapper
"""Add other in self, and return new dict
Args:
other: dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = {1: 1, 2: 2, 3: 3}
>>> to_merge_dict = d({3: 4, 4: 5})
>>> current_dict + to_merge_dict
{1: 1, 2: 2, 3: 4, 4: 5}
"""
copy = self.__class__(other.copy())
return copy.merge(self)
def __iadd__(self, other):
# type: (dict) -> DictWrapper
"""Add other in self
Args:
other: Dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> current_dict += {5: 6, 6: 7}
>>> current_dict
{1: 1, 2: 2, 3: 3, 5: 6, 6: 7}
"""
return self.merge(other)
|
Tygs/ww | src/ww/wrappers/dicts.py | DictWrapper.fromkeys | python | def fromkeys(cls, iterable, value=None):
# TODO : type: (Iterable, Union[Any, Callable]) -> DictWrapper
# https://github.com/python/mypy/issues/2254
if not callable(value):
return cls(dict.fromkeys(iterable, value))
return cls((key, value(key)) for key in iterable) | Create a new d from
Args:
iterable: Iterable containing keys
value: value to associate with each key.
If callable, will be value[key]
Returns: new DictWrapper
Example:
>>> from ww import d
>>> sorted(d.fromkeys('123', value=4).items())
[('1', 4), ('2', 4), ('3', 4)]
>>> sorted(d.fromkeys(range(3), value=lambda e:e**2).items())
[(0, 0), (1, 1), (2, 4)] | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/wrappers/dicts.py#L94-L117 | null | class DictWrapper(dict):
def isubset(self, *keys):
# type: (*Hashable) -> ww.g
"""Return key, self[key] as generator for key in keys.
Raise KeyError if a key does not exist
Args:
keys: Iterable containing keys
Example:
>>> from ww import d
>>> list(d({1: 1, 2: 2, 3: 3}).isubset(1, 3))
[(1, 1), (3, 3)]
"""
return ww.g((key, self[key]) for key in keys)
def subset(self, *keys):
# type: (*Hashable) -> DictWrapper
"""Return d(key, self[key]) for key in keys.
Raise KeyError if a key does not exist
Args:
keys: Iterable containing keys
Example:
>>> from ww import d
>>> d({1: 1, 2: 2, 3: 3}).subset(1,3)
{1: 1, 3: 3}
"""
return self.__class__(self.isubset(*keys))
def swap(self):
# type: () -> DictWrapper
"""Swap key and value
/!\ Be carreful, if there are duplicate values, only one will
survive /!\
Example:
>>> from ww import d
>>> d({1: 2, 2: 2, 3: 3}).swap()
{2: 2, 3: 3}
"""
return self.__class__((v, k) for k, v in self.items())
def add(self, key, value):
# type: (Hashable, Any) -> DictWrapper
"""Add value in key
Args:
key: Key
value: Value to associate to the key
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> current_dict.add(4, 5)
{1: 1, 2: 2, 3: 3, 4: 5}
"""
self[key] = value
return self
def __iter__(self):
"""Return the inner iterator
Returns: iterator
"""
return iter(ww.g(self.items()))
@classmethod
def merge(self, other_dict):
# type: (dict) -> DictWrapper
"""Merge self with other_dict
Args:
other_dict: dict to merge with self
Returns: merged dict
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> to_merge_dict = d({3: 4, 4: 5})
>>> current_dict.merge(to_merge_dict)
{1: 1, 2: 2, 3: 4, 4: 5}
"""
self.update(other_dict)
return self
def delete(self, *keys):
# type: (*Hashable) -> DictWrapper
"""Delete keys from dict
Args:
*keys: Iterable containing keys to delete
Returns: self
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> current_dict.delete(*[1,2])
{3: 3}
"""
for key in keys:
self.pop(key, None)
return self
def __add__(self, other):
# type: (dict) -> DictWrapper
"""Add other in self and return new dict
Args:
other: dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> to_merge_dict = {3: 4, 4: 5}
>>> current_dict + to_merge_dict
{1: 1, 2: 2, 3: 4, 4: 5}
"""
copy = self.__class__(self.copy())
return copy.merge(other)
def __radd__(self, other):
# type: (dict) -> DictWrapper
"""Add other in self, and return new dict
Args:
other: dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = {1: 1, 2: 2, 3: 3}
>>> to_merge_dict = d({3: 4, 4: 5})
>>> current_dict + to_merge_dict
{1: 1, 2: 2, 3: 4, 4: 5}
"""
copy = self.__class__(other.copy())
return copy.merge(self)
def __iadd__(self, other):
# type: (dict) -> DictWrapper
"""Add other in self
Args:
other: Dict to add in self
Returns: Merged dict
Example:
>>> from ww import d
>>> current_dict = d({1: 1, 2: 2, 3: 3})
>>> current_dict += {5: 6, 6: 7}
>>> current_dict
{1: 1, 2: 2, 3: 3, 5: 6, 6: 7}
"""
return self.merge(other)
|
Tygs/ww | src/ww/tools/strings.py | multisplit | python | def multisplit(string, # type: unicode
*separators, # type: unicode
**kwargs # type: Union[unicode, C[..., I[unicode]]]
): # type: (...) -> I
cast = kwargs.pop('cast', list)
flags = parse_re_flags(kwargs.get('flags', 0))
# 0 means "no limit" for re.split
maxsplit = require_positive_number(kwargs.get('maxsplit', 0),
'maxsplit')
# no separator means we use the default unicode.split behavior
if not separators:
if flags:
raise ValueError(ww.s >> """
You can't pass flags without passing
a separator. Flags only have sense if
you split using a regex.
""")
maxsplit = maxsplit or -1 # -1 means "no limit" for unicode.split
return unicode.split(string, None, maxsplit)
# Check that all separators are strings
for i, sep in enumerate(separators):
if not isinstance(sep, unicode):
raise TypeError(ww.s >> """
'{!r}', the separator at index '{}', is of type '{}'.
multisplit() only accepts unicode strings.
""".format(sep, i, type(sep)))
# TODO: split let many empty strings in the result. Fix it.
seps = list(separators) # cast to list so we can slice it
# simple code for when you need to split the whole string
if maxsplit == 0:
return cast(_split(string, seps, flags))
# slow implementation with checks for recursive maxsplit
return cast(_split_with_max(string, seps, maxsplit, flags)) | Like unicode.split, but accept several separators and regexes
Args:
string: the string to split.
separators: strings you can split on. Each string can be a
regex.
maxsplit: max number of time you wish to split. default is 0,
which means no limit.
flags: flags you wish to pass if you use regexes. You should
pass them as a string containing a combination of:
- 'm' for re.MULTILINE
- 'x' for re.VERBOSE
- 'v' for re.VERBOSE
- 's' for re.DOTALL
- '.' for re.DOTALL
- 'd' for re.DEBUG
- 'i' for re.IGNORECASE
- 'u' for re.UNICODE
- 'l' for re.LOCALE
cast: what to cast the result to
Returns:
An iterable of substrings.
Raises:
ValueError: if you pass a flag without separators.
TypeError: if you pass something else than unicode strings.
Example:
>>> for word in multisplit(u'fat black cat, big'): print(word)
fat
black
cat,
big
>>> string = u'a,b;c/d=a,b;c/d'
>>> chunks = multisplit(string, u',', u';', u'[/=]', maxsplit=4)
>>> for chunk in chunks: print(chunk)
a
b
c
d
a,b;c/d | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/strings.py#L77-L163 | [
"def require_positive_number(number, name,\n tpl='{} must be a positive number or 0, not \"{}\"'):\n try:\n number = int(number)\n except (ValueError, TypeError):\n raise ValueError(tpl.format(name, number))\n\n if number < 0:\n raise ValueError(tpl.format(name, number))\n\n return number\n",
"def parse_re_flags(flags):\n bflags = 0\n if isinstance(flags, basestring):\n for flag in flags:\n bflags |= REGEX_FLAGS[flag]\n\n return bflags\n\n return flags\n"
] | # coding: utf-8
"""
:doc:`s() </string_wrapper>` is very convenient, but it's only a
thin wrapper on top of regular strings and the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your strings into StringWrapper objects, you can use the functions
from this module directly.
They don't accept bytes as an input. If you do so and it works, you must
know it's not a supported behavior and may change in the future. Only
pass:
- unicode objects in Python 2;
- str objects in Python 3.
Example:
>>> from ww.tools.strings import multisplit # same as s().split()
>>> string = u'a,b;c/d=a,b;c/d'
>>> chunks = multisplit(string, u',', u';', u'[/=]', maxsplit=4)
>>> for chunk in chunks: print(chunk)
a
b
c
d
a,b;c/d
You'll find bellow the detailed documentation for each functions of.
Go have a look, there is some great stuff here!
"""
from __future__ import absolute_import, division, print_function
import re
from past.builtins import basestring
import ww
from ww.utils import require_positive_number, ensure_tuple
from ww.types import unicode, str_istr, str_istr_icallable, C, I # noqa
REGEX_FLAGS = {
'm': re.MULTILINE,
'x': re.VERBOSE,
'v': re.VERBOSE,
's': re.DOTALL,
'.': re.DOTALL,
'd': re.DEBUG,
'i': re.IGNORECASE,
'u': re.UNICODE,
'l': re.LOCALE,
}
try:
# Python2 doesn't support re.ASCII flag
REGEX_FLAGS['a'] = re.ASCII
except AttributeError: # pragma: no cover
pass
def parse_re_flags(flags):
bflags = 0
if isinstance(flags, basestring):
for flag in flags:
bflags |= REGEX_FLAGS[flag]
return bflags
return flags
# kwargs allow compatibiliy with 2.7 and 3 since you can't use
# keyword-only arguments in python 2
# TODO: remove empty strings
def _split(string, separators, flags=0):
try:
sep = separators.pop()
except IndexError:
yield string
else:
# recursive split until we got the smallest chunks
for chunk in re.split(sep, string, flags=flags):
for item in _split(chunk, separators, flags=flags):
yield item
def _split_with_max(string, separators, maxsplit, flags=0):
try:
sep = separators.pop()
except IndexError:
yield string
else:
while True:
if maxsplit <= 0:
yield string
break
# we split only in 2, then recursively head first to get the rest
res = re.split(sep, string, maxsplit=1, flags=flags)
if len(res) < 2:
yield string
# Nothing to split anymore, we never reached maxsplit but we
# can exit anyway
break
head, tail = res
chunks = _split_with_max(head, separators,
maxsplit=maxsplit, flags=flags)
for chunk in chunks:
# remove chunks from maxsplit
yield chunk
maxsplit -= 1
string = tail
def multireplace(string, # type: unicode
patterns, # type: str_or_str_iterable
substitutions, # type: str_istr_icallable
maxreplace=0, # type: int
flags=0 # type: unicode
): # type: (...) -> bool
""" Like unicode.replace() but accept several substitutions and regexes
Args:
string: the string to split on.
patterns: a string, or an iterable of strings to be replaced.
substitutions: a string or an iterable of string to use as a
replacement. You can pass either one string, or
an iterable containing the same number of
sustitutions that you passed as patterns. You can
also pass a callable instead of a string. It
should expact a match object as a parameter.
maxreplace: the max number of replacement to make. 0 is no limit,
which is the default.
flags: flags you wish to pass if you use regexes. You should
pass them as a string containing a combination of:
- 'm' for re.MULTILINE
- 'x' for re.VERBOSE
- 'v' for re.VERBOSE
- 's' for re.DOTALL
- '.' for re.DOTALL
- 'd' for re.DEBUG
- 'i' for re.IGNORECASE
- 'u' for re.UNICODE
- 'l' for re.LOCALE
Returns:
The string with replaced bits.
Raises:
ValueError: if you pass the wrong number of substitution.
Example:
>>> print(multireplace(u'a,b;c/d', (u',', u';', u'/'), u','))
a,b,c,d
>>> print(multireplace(u'a1b33c-d', u'\d+', u','))
a,b,c-d
>>> print(multireplace(u'a-1,b-3,3c-d', u',|-', u'', maxreplace=3))
a1b3,3c-d
>>> def upper(match):
... return match.group().upper()
...
>>> print(multireplace(u'a-1,b-3,3c-d', u'[ab]', upper))
A-1,B-3,3c-d
"""
# we can pass either a string or an iterable of strings
patterns = ensure_tuple(patterns)
substitutions = ensure_tuple(substitutions)
# you can either have:
# - many patterns, one substitution
# - many patterns, exactly as many substitutions
# anything else is an error
num_of_subs = len(substitutions)
num_of_patterns = len(patterns)
if num_of_subs == 1 and num_of_patterns > 0:
substitutions *= num_of_patterns
elif len(patterns) != num_of_subs:
raise ValueError("You must have exactly one substitution "
"for each pattern or only one substitution")
flags = parse_re_flags(flags)
# no limit for replacing, use a simple code
if not maxreplace:
for pattern, sub in zip(patterns, substitutions):
string, count = re.subn(pattern, sub, string, flags=flags)
return string
# ensure we respect the max number of replace accross substitutions
for pattern, sub in zip(patterns, substitutions):
string, count = re.subn(pattern, sub, string,
count=maxreplace, flags=flags)
maxreplace -= count
if maxreplace == 0:
break
return string
|
Tygs/ww | src/ww/tools/strings.py | multireplace | python | def multireplace(string, # type: unicode
patterns, # type: str_or_str_iterable
substitutions, # type: str_istr_icallable
maxreplace=0, # type: int
flags=0 # type: unicode
): # type: (...) -> bool
# we can pass either a string or an iterable of strings
patterns = ensure_tuple(patterns)
substitutions = ensure_tuple(substitutions)
# you can either have:
# - many patterns, one substitution
# - many patterns, exactly as many substitutions
# anything else is an error
num_of_subs = len(substitutions)
num_of_patterns = len(patterns)
if num_of_subs == 1 and num_of_patterns > 0:
substitutions *= num_of_patterns
elif len(patterns) != num_of_subs:
raise ValueError("You must have exactly one substitution "
"for each pattern or only one substitution")
flags = parse_re_flags(flags)
# no limit for replacing, use a simple code
if not maxreplace:
for pattern, sub in zip(patterns, substitutions):
string, count = re.subn(pattern, sub, string, flags=flags)
return string
# ensure we respect the max number of replace accross substitutions
for pattern, sub in zip(patterns, substitutions):
string, count = re.subn(pattern, sub, string,
count=maxreplace, flags=flags)
maxreplace -= count
if maxreplace == 0:
break
return string | Like unicode.replace() but accept several substitutions and regexes
Args:
string: the string to split on.
patterns: a string, or an iterable of strings to be replaced.
substitutions: a string or an iterable of string to use as a
replacement. You can pass either one string, or
an iterable containing the same number of
sustitutions that you passed as patterns. You can
also pass a callable instead of a string. It
should expact a match object as a parameter.
maxreplace: the max number of replacement to make. 0 is no limit,
which is the default.
flags: flags you wish to pass if you use regexes. You should
pass them as a string containing a combination of:
- 'm' for re.MULTILINE
- 'x' for re.VERBOSE
- 'v' for re.VERBOSE
- 's' for re.DOTALL
- '.' for re.DOTALL
- 'd' for re.DEBUG
- 'i' for re.IGNORECASE
- 'u' for re.UNICODE
- 'l' for re.LOCALE
Returns:
The string with replaced bits.
Raises:
ValueError: if you pass the wrong number of substitution.
Example:
>>> print(multireplace(u'a,b;c/d', (u',', u';', u'/'), u','))
a,b,c,d
>>> print(multireplace(u'a1b33c-d', u'\d+', u','))
a,b,c-d
>>> print(multireplace(u'a-1,b-3,3c-d', u',|-', u'', maxreplace=3))
a1b3,3c-d
>>> def upper(match):
... return match.group().upper()
...
>>> print(multireplace(u'a-1,b-3,3c-d', u'[ab]', upper))
A-1,B-3,3c-d | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/tools/strings.py#L214-L300 | [
"def ensure_tuple(val):\n if not isinstance(val, basestring):\n try:\n return tuple(val)\n except TypeError:\n return (val,)\n return (val,)\n",
"def parse_re_flags(flags):\n bflags = 0\n if isinstance(flags, basestring):\n for flag in flags:\n bflags |= REGEX_FLAGS[flag]\n\n return bflags\n\n return flags\n"
] | # coding: utf-8
"""
:doc:`s() </string_wrapper>` is very convenient, but it's only a
thin wrapper on top of regular strings and the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your strings into StringWrapper objects, you can use the functions
from this module directly.
They don't accept bytes as an input. If you do so and it works, you must
know it's not a supported behavior and may change in the future. Only
pass:
- unicode objects in Python 2;
- str objects in Python 3.
Example:
>>> from ww.tools.strings import multisplit # same as s().split()
>>> string = u'a,b;c/d=a,b;c/d'
>>> chunks = multisplit(string, u',', u';', u'[/=]', maxsplit=4)
>>> for chunk in chunks: print(chunk)
a
b
c
d
a,b;c/d
You'll find bellow the detailed documentation for each functions of.
Go have a look, there is some great stuff here!
"""
from __future__ import absolute_import, division, print_function
import re
from past.builtins import basestring
import ww
from ww.utils import require_positive_number, ensure_tuple
from ww.types import unicode, str_istr, str_istr_icallable, C, I # noqa
REGEX_FLAGS = {
'm': re.MULTILINE,
'x': re.VERBOSE,
'v': re.VERBOSE,
's': re.DOTALL,
'.': re.DOTALL,
'd': re.DEBUG,
'i': re.IGNORECASE,
'u': re.UNICODE,
'l': re.LOCALE,
}
try:
# Python2 doesn't support re.ASCII flag
REGEX_FLAGS['a'] = re.ASCII
except AttributeError: # pragma: no cover
pass
def parse_re_flags(flags):
bflags = 0
if isinstance(flags, basestring):
for flag in flags:
bflags |= REGEX_FLAGS[flag]
return bflags
return flags
# kwargs allow compatibiliy with 2.7 and 3 since you can't use
# keyword-only arguments in python 2
# TODO: remove empty strings
def multisplit(string, # type: unicode
*separators, # type: unicode
**kwargs # type: Union[unicode, C[..., I[unicode]]]
): # type: (...) -> I
""" Like unicode.split, but accept several separators and regexes
Args:
string: the string to split.
separators: strings you can split on. Each string can be a
regex.
maxsplit: max number of time you wish to split. default is 0,
which means no limit.
flags: flags you wish to pass if you use regexes. You should
pass them as a string containing a combination of:
- 'm' for re.MULTILINE
- 'x' for re.VERBOSE
- 'v' for re.VERBOSE
- 's' for re.DOTALL
- '.' for re.DOTALL
- 'd' for re.DEBUG
- 'i' for re.IGNORECASE
- 'u' for re.UNICODE
- 'l' for re.LOCALE
cast: what to cast the result to
Returns:
An iterable of substrings.
Raises:
ValueError: if you pass a flag without separators.
TypeError: if you pass something else than unicode strings.
Example:
>>> for word in multisplit(u'fat black cat, big'): print(word)
fat
black
cat,
big
>>> string = u'a,b;c/d=a,b;c/d'
>>> chunks = multisplit(string, u',', u';', u'[/=]', maxsplit=4)
>>> for chunk in chunks: print(chunk)
a
b
c
d
a,b;c/d
"""
cast = kwargs.pop('cast', list)
flags = parse_re_flags(kwargs.get('flags', 0))
# 0 means "no limit" for re.split
maxsplit = require_positive_number(kwargs.get('maxsplit', 0),
'maxsplit')
# no separator means we use the default unicode.split behavior
if not separators:
if flags:
raise ValueError(ww.s >> """
You can't pass flags without passing
a separator. Flags only have sense if
you split using a regex.
""")
maxsplit = maxsplit or -1 # -1 means "no limit" for unicode.split
return unicode.split(string, None, maxsplit)
# Check that all separators are strings
for i, sep in enumerate(separators):
if not isinstance(sep, unicode):
raise TypeError(ww.s >> """
'{!r}', the separator at index '{}', is of type '{}'.
multisplit() only accepts unicode strings.
""".format(sep, i, type(sep)))
# TODO: split let many empty strings in the result. Fix it.
seps = list(separators) # cast to list so we can slice it
# simple code for when you need to split the whole string
if maxsplit == 0:
return cast(_split(string, seps, flags))
# slow implementation with checks for recursive maxsplit
return cast(_split_with_max(string, seps, maxsplit, flags))
def _split(string, separators, flags=0):
try:
sep = separators.pop()
except IndexError:
yield string
else:
# recursive split until we got the smallest chunks
for chunk in re.split(sep, string, flags=flags):
for item in _split(chunk, separators, flags=flags):
yield item
def _split_with_max(string, separators, maxsplit, flags=0):
try:
sep = separators.pop()
except IndexError:
yield string
else:
while True:
if maxsplit <= 0:
yield string
break
# we split only in 2, then recursively head first to get the rest
res = re.split(sep, string, maxsplit=1, flags=flags)
if len(res) < 2:
yield string
# Nothing to split anymore, we never reached maxsplit but we
# can exit anyway
break
head, tail = res
chunks = _split_with_max(head, separators,
maxsplit=maxsplit, flags=flags)
for chunk in chunks:
# remove chunks from maxsplit
yield chunk
maxsplit -= 1
string = tail
|
Tygs/ww | src/ww/wrappers/tuples.py | TupleWrapper.index | python | def index(self, value):
for i, x in enumerate(self):
if x == value:
return i
raise ValueError("{} is not in list".format(value)) | Args:
value: index
Returns: index of the values
Raises:
ValueError: value is not in list | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/wrappers/tuples.py#L33-L48 | null | class TupleWrapper(tuple):
@property
def len(self):
return len(self)
def join(self, joiner, formatter=lambda s, t: t.format(s),
template="{}"):
"""Join values and convert to string
Example:
>>> from ww import t
>>> lst = t('012')
>>> lst.join(',')
u'0,1,2'
>>> lst.join(',', template="{}#")
u'0#,1#,2#'
>>> string = lst.join(',',\
formatter = lambda x, y: str(int(x) ** 2))
>>> string
u'0,1,4'
"""
return ww.s(joiner).join(self, formatter, template)
def to_l(self):
"""
Args: self
Returns: a l that stems from self
"""
return ww.l(self)
# TODO: rename this method to dict()
def to_d(self):
"""
Args: self
Returns: a d that stems from self
Raises:
ValueError: dictionary update sequence element #index has length
len(tuple); 2 is required
TypeError: cannot convert dictionary update sequence element
#index to a sequence
"""
try:
return ww.d(self)
except (TypeError, ValueError):
for i, element in enumerate(self):
try:
iter(element)
# TODO: find out why we can't cover this branch. The code
# is tested but don't appear in coverage
except TypeError: # pragma: no cover
# TODO: use raise_from ?
raise ValueError(("'{}' (position {}) is not iterable. You"
" can only create a dictionary from a "
"elements that are iterables, such as "
"tuples, lists, etc.")
.format(element, i))
try:
size = len(element)
except TypeError: # ignore generators, it's already consummed
pass
else:
raise ValueError(("'{}' (position {}) contains {} "
"elements. You can only create a "
"dictionary from iterables containing "
"2 elements.").format(element, i, size))
raise
|
Tygs/ww | src/ww/wrappers/tuples.py | TupleWrapper.to_d | python | def to_d(self):
try:
return ww.d(self)
except (TypeError, ValueError):
for i, element in enumerate(self):
try:
iter(element)
# TODO: find out why we can't cover this branch. The code
# is tested but don't appear in coverage
except TypeError: # pragma: no cover
# TODO: use raise_from ?
raise ValueError(("'{}' (position {}) is not iterable. You"
" can only create a dictionary from a "
"elements that are iterables, such as "
"tuples, lists, etc.")
.format(element, i))
try:
size = len(element)
except TypeError: # ignore generators, it's already consummed
pass
else:
raise ValueError(("'{}' (position {}) contains {} "
"elements. You can only create a "
"dictionary from iterables containing "
"2 elements.").format(element, i, size))
raise | Args: self
Returns: a d that stems from self
Raises:
ValueError: dictionary update sequence element #index has length
len(tuple); 2 is required
TypeError: cannot convert dictionary update sequence element
#index to a sequence | train | https://github.com/Tygs/ww/blob/6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4/src/ww/wrappers/tuples.py#L59-L101 | null | class TupleWrapper(tuple):
@property
def len(self):
return len(self)
def join(self, joiner, formatter=lambda s, t: t.format(s),
template="{}"):
"""Join values and convert to string
Example:
>>> from ww import t
>>> lst = t('012')
>>> lst.join(',')
u'0,1,2'
>>> lst.join(',', template="{}#")
u'0#,1#,2#'
>>> string = lst.join(',',\
formatter = lambda x, y: str(int(x) ** 2))
>>> string
u'0,1,4'
"""
return ww.s(joiner).join(self, formatter, template)
def index(self, value):
"""
Args:
value: index
Returns: index of the values
Raises:
ValueError: value is not in list
"""
for i, x in enumerate(self):
if x == value:
return i
raise ValueError("{} is not in list".format(value))
def to_l(self):
"""
Args: self
Returns: a l that stems from self
"""
return ww.l(self)
# TODO: rename this method to dict()
|
kensho-technologies/grift | grift/config.py | ConfigProperty.load | python | def load(self, value):
if self.property_type is None:
return value
elif not isinstance(self.property_type, BaseType):
raise TypeError('property_type must be schematics BaseType')
else:
native_value = self.property_type.to_native(value)
self.property_type.validate(native_value)
return native_value | Load a value, converting it to the proper type if validation_type exists. | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L35-L44 | null | class ConfigProperty(object):
def __init__(self, property_key=None, property_type=None, required=True, default=None,
exclude_from_varz=False):
"""Define the schema for a property of a BaseConfig subclass
Args:
property_key: None | string
Key of the property to get from a loader. Defaults to name of the attribute name in
the BaseConfig class.
property_type: None | schematics BaseType
If property_type is specified, the loaded value is passed through to_native()
and validate(). If the value is invalid, an exception is raised.
If property_type is None (default), conversion and validation steps are skipped.
required: bool
True if the Config should require a value for this property. If a default value is
specified to be anything except None, the requirement is always satisfied (i.e.
this is effectively False).
default: default value
exclude_from_varz: bool
Determines if the property should be included in the BaseConfig's varz dict. If
True, the value is not added.
"""
self.property_key = property_key
self.property_type = property_type
self.required = required
self.default = default
self.exclude_from_varz = exclude_from_varz
|
kensho-technologies/grift | grift/config.py | BaseConfig._iter_config_props | python | def _iter_config_props(cls):
props = inspect.getmembers(cls, lambda a: isinstance(a, ConfigProperty))
for attr_name, config_prop in props:
yield attr_name, config_prop | Iterate over all ConfigProperty attributes, yielding (attr_name, config_property) | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L70-L74 | null | class BaseConfig(object):
"""Base class to hold configuration settings"""
def __init__(self, loaders):
"""Load values into the class's ConfigProperty attributes (validating types if possible)
Args:
loaders: iterable of AbstractLoader instances
ConfigProperty values are loaded from these sources; and the order indicates
preference.
"""
if not loaders:
# Require loaders only if the class has ConfigProperty attributes
if any(self._iter_config_props()):
raise AssertionError('Class has ConfigProperty attributes: must provide loader(s)')
self._update_property_keys()
self.varz = {}
self._loaders = loaders
self._load()
@classmethod
@classmethod
def _update_property_keys(cls):
"""Set unspecified property_keys for each ConfigProperty to the name of the class attr"""
for attr_name, config_prop in cls._iter_config_props():
if config_prop.property_key is None:
config_prop.property_key = attr_name
def _set_instance_prop(self, attr_name, config_prop, value):
"""Set instance property to a value and add it varz if needed"""
setattr(self, attr_name, value)
# add to varz if it is not private
if not config_prop.exclude_from_varz:
self.varz[attr_name] = value
def _load(self):
"""Load values for all ConfigProperty attributes"""
for attr_name, config_prop in self._iter_config_props():
found = False
for loader in self._loaders:
if loader.exists(config_prop.property_key):
raw_value = loader.get(config_prop.property_key)
converted_value = config_prop.load(raw_value)
self._set_instance_prop(attr_name, config_prop, converted_value)
found = True
break
if not found:
if not config_prop.required or config_prop.default is not None:
self._set_instance_prop(attr_name, config_prop, config_prop.default)
else:
raise ValueError('Missing required ConfigProperty {}'.format(attr_name))
def reload(self):
"""Reload all ConfigProperty values (reading from loader sources again, if applicable)"""
for loader in self._loaders:
loader.reload()
self.varz = {} # reset varz
self._load()
def as_dict(self):
"""Return all properties and values in a dictionary (includes private properties)"""
return {config_name: getattr(self, config_name)
for config_name, _ in self._iter_config_props()}
|
kensho-technologies/grift | grift/config.py | BaseConfig._update_property_keys | python | def _update_property_keys(cls):
for attr_name, config_prop in cls._iter_config_props():
if config_prop.property_key is None:
config_prop.property_key = attr_name | Set unspecified property_keys for each ConfigProperty to the name of the class attr | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L77-L81 | [
"def _iter_config_props(cls):\n \"\"\"Iterate over all ConfigProperty attributes, yielding (attr_name, config_property) \"\"\"\n props = inspect.getmembers(cls, lambda a: isinstance(a, ConfigProperty))\n for attr_name, config_prop in props:\n yield attr_name, config_prop\n"
] | class BaseConfig(object):
"""Base class to hold configuration settings"""
def __init__(self, loaders):
"""Load values into the class's ConfigProperty attributes (validating types if possible)
Args:
loaders: iterable of AbstractLoader instances
ConfigProperty values are loaded from these sources; and the order indicates
preference.
"""
if not loaders:
# Require loaders only if the class has ConfigProperty attributes
if any(self._iter_config_props()):
raise AssertionError('Class has ConfigProperty attributes: must provide loader(s)')
self._update_property_keys()
self.varz = {}
self._loaders = loaders
self._load()
@classmethod
def _iter_config_props(cls):
"""Iterate over all ConfigProperty attributes, yielding (attr_name, config_property) """
props = inspect.getmembers(cls, lambda a: isinstance(a, ConfigProperty))
for attr_name, config_prop in props:
yield attr_name, config_prop
@classmethod
def _set_instance_prop(self, attr_name, config_prop, value):
"""Set instance property to a value and add it varz if needed"""
setattr(self, attr_name, value)
# add to varz if it is not private
if not config_prop.exclude_from_varz:
self.varz[attr_name] = value
def _load(self):
"""Load values for all ConfigProperty attributes"""
for attr_name, config_prop in self._iter_config_props():
found = False
for loader in self._loaders:
if loader.exists(config_prop.property_key):
raw_value = loader.get(config_prop.property_key)
converted_value = config_prop.load(raw_value)
self._set_instance_prop(attr_name, config_prop, converted_value)
found = True
break
if not found:
if not config_prop.required or config_prop.default is not None:
self._set_instance_prop(attr_name, config_prop, config_prop.default)
else:
raise ValueError('Missing required ConfigProperty {}'.format(attr_name))
def reload(self):
"""Reload all ConfigProperty values (reading from loader sources again, if applicable)"""
for loader in self._loaders:
loader.reload()
self.varz = {} # reset varz
self._load()
def as_dict(self):
"""Return all properties and values in a dictionary (includes private properties)"""
return {config_name: getattr(self, config_name)
for config_name, _ in self._iter_config_props()}
|
kensho-technologies/grift | grift/config.py | BaseConfig._set_instance_prop | python | def _set_instance_prop(self, attr_name, config_prop, value):
setattr(self, attr_name, value)
# add to varz if it is not private
if not config_prop.exclude_from_varz:
self.varz[attr_name] = value | Set instance property to a value and add it varz if needed | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L83-L89 | null | class BaseConfig(object):
"""Base class to hold configuration settings"""
def __init__(self, loaders):
"""Load values into the class's ConfigProperty attributes (validating types if possible)
Args:
loaders: iterable of AbstractLoader instances
ConfigProperty values are loaded from these sources; and the order indicates
preference.
"""
if not loaders:
# Require loaders only if the class has ConfigProperty attributes
if any(self._iter_config_props()):
raise AssertionError('Class has ConfigProperty attributes: must provide loader(s)')
self._update_property_keys()
self.varz = {}
self._loaders = loaders
self._load()
@classmethod
def _iter_config_props(cls):
"""Iterate over all ConfigProperty attributes, yielding (attr_name, config_property) """
props = inspect.getmembers(cls, lambda a: isinstance(a, ConfigProperty))
for attr_name, config_prop in props:
yield attr_name, config_prop
@classmethod
def _update_property_keys(cls):
"""Set unspecified property_keys for each ConfigProperty to the name of the class attr"""
for attr_name, config_prop in cls._iter_config_props():
if config_prop.property_key is None:
config_prop.property_key = attr_name
def _load(self):
"""Load values for all ConfigProperty attributes"""
for attr_name, config_prop in self._iter_config_props():
found = False
for loader in self._loaders:
if loader.exists(config_prop.property_key):
raw_value = loader.get(config_prop.property_key)
converted_value = config_prop.load(raw_value)
self._set_instance_prop(attr_name, config_prop, converted_value)
found = True
break
if not found:
if not config_prop.required or config_prop.default is not None:
self._set_instance_prop(attr_name, config_prop, config_prop.default)
else:
raise ValueError('Missing required ConfigProperty {}'.format(attr_name))
def reload(self):
"""Reload all ConfigProperty values (reading from loader sources again, if applicable)"""
for loader in self._loaders:
loader.reload()
self.varz = {} # reset varz
self._load()
def as_dict(self):
"""Return all properties and values in a dictionary (includes private properties)"""
return {config_name: getattr(self, config_name)
for config_name, _ in self._iter_config_props()}
|
kensho-technologies/grift | grift/config.py | BaseConfig._load | python | def _load(self):
for attr_name, config_prop in self._iter_config_props():
found = False
for loader in self._loaders:
if loader.exists(config_prop.property_key):
raw_value = loader.get(config_prop.property_key)
converted_value = config_prop.load(raw_value)
self._set_instance_prop(attr_name, config_prop, converted_value)
found = True
break
if not found:
if not config_prop.required or config_prop.default is not None:
self._set_instance_prop(attr_name, config_prop, config_prop.default)
else:
raise ValueError('Missing required ConfigProperty {}'.format(attr_name)) | Load values for all ConfigProperty attributes | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L91-L108 | [
"def _iter_config_props(cls):\n \"\"\"Iterate over all ConfigProperty attributes, yielding (attr_name, config_property) \"\"\"\n props = inspect.getmembers(cls, lambda a: isinstance(a, ConfigProperty))\n for attr_name, config_prop in props:\n yield attr_name, config_prop\n",
"def _set_instance_prop(self, attr_name, config_prop, value):\n \"\"\"Set instance property to a value and add it varz if needed\"\"\"\n setattr(self, attr_name, value)\n\n # add to varz if it is not private\n if not config_prop.exclude_from_varz:\n self.varz[attr_name] = value\n"
] | class BaseConfig(object):
"""Base class to hold configuration settings"""
def __init__(self, loaders):
"""Load values into the class's ConfigProperty attributes (validating types if possible)
Args:
loaders: iterable of AbstractLoader instances
ConfigProperty values are loaded from these sources; and the order indicates
preference.
"""
if not loaders:
# Require loaders only if the class has ConfigProperty attributes
if any(self._iter_config_props()):
raise AssertionError('Class has ConfigProperty attributes: must provide loader(s)')
self._update_property_keys()
self.varz = {}
self._loaders = loaders
self._load()
@classmethod
def _iter_config_props(cls):
"""Iterate over all ConfigProperty attributes, yielding (attr_name, config_property) """
props = inspect.getmembers(cls, lambda a: isinstance(a, ConfigProperty))
for attr_name, config_prop in props:
yield attr_name, config_prop
@classmethod
def _update_property_keys(cls):
"""Set unspecified property_keys for each ConfigProperty to the name of the class attr"""
for attr_name, config_prop in cls._iter_config_props():
if config_prop.property_key is None:
config_prop.property_key = attr_name
def _set_instance_prop(self, attr_name, config_prop, value):
"""Set instance property to a value and add it varz if needed"""
setattr(self, attr_name, value)
# add to varz if it is not private
if not config_prop.exclude_from_varz:
self.varz[attr_name] = value
def reload(self):
"""Reload all ConfigProperty values (reading from loader sources again, if applicable)"""
for loader in self._loaders:
loader.reload()
self.varz = {} # reset varz
self._load()
def as_dict(self):
"""Return all properties and values in a dictionary (includes private properties)"""
return {config_name: getattr(self, config_name)
for config_name, _ in self._iter_config_props()}
|
kensho-technologies/grift | grift/config.py | BaseConfig.reload | python | def reload(self):
for loader in self._loaders:
loader.reload()
self.varz = {} # reset varz
self._load() | Reload all ConfigProperty values (reading from loader sources again, if applicable) | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L110-L116 | [
"def _load(self):\n \"\"\"Load values for all ConfigProperty attributes\"\"\"\n for attr_name, config_prop in self._iter_config_props():\n found = False\n for loader in self._loaders:\n if loader.exists(config_prop.property_key):\n raw_value = loader.get(config_prop.property_key)\n converted_value = config_prop.load(raw_value)\n\n self._set_instance_prop(attr_name, config_prop, converted_value)\n found = True\n break\n\n if not found:\n if not config_prop.required or config_prop.default is not None:\n self._set_instance_prop(attr_name, config_prop, config_prop.default)\n else:\n raise ValueError('Missing required ConfigProperty {}'.format(attr_name))\n"
] | class BaseConfig(object):
"""Base class to hold configuration settings"""
def __init__(self, loaders):
"""Load values into the class's ConfigProperty attributes (validating types if possible)
Args:
loaders: iterable of AbstractLoader instances
ConfigProperty values are loaded from these sources; and the order indicates
preference.
"""
if not loaders:
# Require loaders only if the class has ConfigProperty attributes
if any(self._iter_config_props()):
raise AssertionError('Class has ConfigProperty attributes: must provide loader(s)')
self._update_property_keys()
self.varz = {}
self._loaders = loaders
self._load()
@classmethod
def _iter_config_props(cls):
"""Iterate over all ConfigProperty attributes, yielding (attr_name, config_property) """
props = inspect.getmembers(cls, lambda a: isinstance(a, ConfigProperty))
for attr_name, config_prop in props:
yield attr_name, config_prop
@classmethod
def _update_property_keys(cls):
"""Set unspecified property_keys for each ConfigProperty to the name of the class attr"""
for attr_name, config_prop in cls._iter_config_props():
if config_prop.property_key is None:
config_prop.property_key = attr_name
def _set_instance_prop(self, attr_name, config_prop, value):
"""Set instance property to a value and add it varz if needed"""
setattr(self, attr_name, value)
# add to varz if it is not private
if not config_prop.exclude_from_varz:
self.varz[attr_name] = value
def _load(self):
"""Load values for all ConfigProperty attributes"""
for attr_name, config_prop in self._iter_config_props():
found = False
for loader in self._loaders:
if loader.exists(config_prop.property_key):
raw_value = loader.get(config_prop.property_key)
converted_value = config_prop.load(raw_value)
self._set_instance_prop(attr_name, config_prop, converted_value)
found = True
break
if not found:
if not config_prop.required or config_prop.default is not None:
self._set_instance_prop(attr_name, config_prop, config_prop.default)
else:
raise ValueError('Missing required ConfigProperty {}'.format(attr_name))
def as_dict(self):
"""Return all properties and values in a dictionary (includes private properties)"""
return {config_name: getattr(self, config_name)
for config_name, _ in self._iter_config_props()}
|
kensho-technologies/grift | grift/config.py | BaseConfig.as_dict | python | def as_dict(self):
return {config_name: getattr(self, config_name)
for config_name, _ in self._iter_config_props()} | Return all properties and values in a dictionary (includes private properties) | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L118-L121 | [
"def _iter_config_props(cls):\n \"\"\"Iterate over all ConfigProperty attributes, yielding (attr_name, config_property) \"\"\"\n props = inspect.getmembers(cls, lambda a: isinstance(a, ConfigProperty))\n for attr_name, config_prop in props:\n yield attr_name, config_prop\n"
] | class BaseConfig(object):
"""Base class to hold configuration settings"""
def __init__(self, loaders):
"""Load values into the class's ConfigProperty attributes (validating types if possible)
Args:
loaders: iterable of AbstractLoader instances
ConfigProperty values are loaded from these sources; and the order indicates
preference.
"""
if not loaders:
# Require loaders only if the class has ConfigProperty attributes
if any(self._iter_config_props()):
raise AssertionError('Class has ConfigProperty attributes: must provide loader(s)')
self._update_property_keys()
self.varz = {}
self._loaders = loaders
self._load()
@classmethod
def _iter_config_props(cls):
"""Iterate over all ConfigProperty attributes, yielding (attr_name, config_property) """
props = inspect.getmembers(cls, lambda a: isinstance(a, ConfigProperty))
for attr_name, config_prop in props:
yield attr_name, config_prop
@classmethod
def _update_property_keys(cls):
"""Set unspecified property_keys for each ConfigProperty to the name of the class attr"""
for attr_name, config_prop in cls._iter_config_props():
if config_prop.property_key is None:
config_prop.property_key = attr_name
def _set_instance_prop(self, attr_name, config_prop, value):
"""Set instance property to a value and add it varz if needed"""
setattr(self, attr_name, value)
# add to varz if it is not private
if not config_prop.exclude_from_varz:
self.varz[attr_name] = value
def _load(self):
"""Load values for all ConfigProperty attributes"""
for attr_name, config_prop in self._iter_config_props():
found = False
for loader in self._loaders:
if loader.exists(config_prop.property_key):
raw_value = loader.get(config_prop.property_key)
converted_value = config_prop.load(raw_value)
self._set_instance_prop(attr_name, config_prop, converted_value)
found = True
break
if not found:
if not config_prop.required or config_prop.default is not None:
self._set_instance_prop(attr_name, config_prop, config_prop.default)
else:
raise ValueError('Missing required ConfigProperty {}'.format(attr_name))
def reload(self):
"""Reload all ConfigProperty values (reading from loader sources again, if applicable)"""
for loader in self._loaders:
loader.reload()
self.varz = {} # reset varz
self._load()
|
kensho-technologies/grift | grift/utils.py | in_same_dir | python | def in_same_dir(as_file, target_file):
return os.path.abspath(os.path.join(os.path.dirname(as_file), target_file)) | Return an absolute path to a target file that is located in the same directory as as_file
Args:
as_file: File name (including __file__)
Use the directory path of this file
target_file: Name of the target file | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/utils.py#L5-L13 | null | # Copyright 2017 Kensho Technologies, LLC.
import os
|
kensho-technologies/grift | grift/property_types.py | DictType.to_native | python | def to_native(self, value):
if isinstance(value, dict):
return value
elif isinstance(value, six.string_types):
native_value = json.loads(value)
if isinstance(native_value, dict):
return native_value
else:
raise ConversionError(u'Cannot load value as a dict: {}'.format(value)) | Return the value as a dict, raising error if conversion to dict is not possible | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L14-L23 | null | class DictType(BaseType):
"""A validation type for dict properties"""
|
kensho-technologies/grift | grift/property_types.py | ListType.to_native | python | def to_native(self, value):
if isinstance(value, six.string_types):
value_list = value.split(self.string_delim)
else:
value_list = value
to_native = self.member_type.to_native if self.member_type is not None else lambda x: x
return [to_native(item) for item in value_list] | Load a value as a list, converting items if necessary | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L47-L55 | null | class ListType(BaseType):
"""A validation type for list properties"""
def __init__(self, member_type=None, string_delim='|', min_length=None, max_length=None,
*args, **kwargs):
"""Optionally specify constraints for the list
Args:
member_type: a BaseType to load/validate members of the value list. If None, the list
members are kept as is (loading and validation are skipped).
string_delim: a delimiter to load a string as a list. Note: whitespace is not stripped,
and consecutive delimiters are respected (e.g. 'a ||b' is loaded as ['a ', '', 'b'])
min_length: specify a minimum length for the list (validated)
max_length: specify a maximum length for the list (validated)
"""
super(ListType, self).__init__(*args, **kwargs)
self.member_type = member_type
self.string_delim = string_delim
self.min_length = min_length
self.max_length = max_length
def validate_member_type(self, value):
"""Validate each member of the list, if member_type exists"""
if self.member_type:
for item in value:
self.member_type.validate(item)
def validate_length(self, value):
"""Validate the length of value, if min_length or max_length was specified"""
list_len = len(value) if value else 0
if self.max_length is not None and list_len > self.max_length:
raise ValidationError(
u'List has {} values; max length is {}'.format(list_len, self.max_length))
if self.min_length is not None and list_len < self.min_length:
raise ValidationError(
u'List has {} values; min length is {}'.format(list_len, self.min_length))
|
kensho-technologies/grift | grift/property_types.py | ListType.validate_member_type | python | def validate_member_type(self, value):
if self.member_type:
for item in value:
self.member_type.validate(item) | Validate each member of the list, if member_type exists | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L57-L61 | null | class ListType(BaseType):
"""A validation type for list properties"""
def __init__(self, member_type=None, string_delim='|', min_length=None, max_length=None,
*args, **kwargs):
"""Optionally specify constraints for the list
Args:
member_type: a BaseType to load/validate members of the value list. If None, the list
members are kept as is (loading and validation are skipped).
string_delim: a delimiter to load a string as a list. Note: whitespace is not stripped,
and consecutive delimiters are respected (e.g. 'a ||b' is loaded as ['a ', '', 'b'])
min_length: specify a minimum length for the list (validated)
max_length: specify a maximum length for the list (validated)
"""
super(ListType, self).__init__(*args, **kwargs)
self.member_type = member_type
self.string_delim = string_delim
self.min_length = min_length
self.max_length = max_length
def to_native(self, value):
"""Load a value as a list, converting items if necessary"""
if isinstance(value, six.string_types):
value_list = value.split(self.string_delim)
else:
value_list = value
to_native = self.member_type.to_native if self.member_type is not None else lambda x: x
return [to_native(item) for item in value_list]
def validate_length(self, value):
"""Validate the length of value, if min_length or max_length was specified"""
list_len = len(value) if value else 0
if self.max_length is not None and list_len > self.max_length:
raise ValidationError(
u'List has {} values; max length is {}'.format(list_len, self.max_length))
if self.min_length is not None and list_len < self.min_length:
raise ValidationError(
u'List has {} values; min length is {}'.format(list_len, self.min_length))
|
kensho-technologies/grift | grift/property_types.py | ListType.validate_length | python | def validate_length(self, value):
list_len = len(value) if value else 0
if self.max_length is not None and list_len > self.max_length:
raise ValidationError(
u'List has {} values; max length is {}'.format(list_len, self.max_length))
if self.min_length is not None and list_len < self.min_length:
raise ValidationError(
u'List has {} values; min length is {}'.format(list_len, self.min_length)) | Validate the length of value, if min_length or max_length was specified | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L63-L73 | null | class ListType(BaseType):
"""A validation type for list properties"""
def __init__(self, member_type=None, string_delim='|', min_length=None, max_length=None,
*args, **kwargs):
"""Optionally specify constraints for the list
Args:
member_type: a BaseType to load/validate members of the value list. If None, the list
members are kept as is (loading and validation are skipped).
string_delim: a delimiter to load a string as a list. Note: whitespace is not stripped,
and consecutive delimiters are respected (e.g. 'a ||b' is loaded as ['a ', '', 'b'])
min_length: specify a minimum length for the list (validated)
max_length: specify a maximum length for the list (validated)
"""
super(ListType, self).__init__(*args, **kwargs)
self.member_type = member_type
self.string_delim = string_delim
self.min_length = min_length
self.max_length = max_length
def to_native(self, value):
"""Load a value as a list, converting items if necessary"""
if isinstance(value, six.string_types):
value_list = value.split(self.string_delim)
else:
value_list = value
to_native = self.member_type.to_native if self.member_type is not None else lambda x: x
return [to_native(item) for item in value_list]
def validate_member_type(self, value):
"""Validate each member of the list, if member_type exists"""
if self.member_type:
for item in value:
self.member_type.validate(item)
|
kensho-technologies/grift | grift/property_types.py | NetworkType.validate_resource | python | def validate_resource(self, value):
def do_backoff(*args, **kwargs):
"""Call self._test_connection with exponential backoff, for self._max_tries attempts"""
attempts = 0
while True:
try:
self._test_connection(*args, **kwargs)
break
except ValidationError:
wait_secs = min(self._max_wait, 2 ** attempts)
attempts += 1
if attempts < self._max_tries:
time.sleep(wait_secs)
else:
raise
do_backoff(value) | Validate the network resource with exponential backoff | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L101-L119 | [
"def do_backoff(*args, **kwargs):\n \"\"\"Call self._test_connection with exponential backoff, for self._max_tries attempts\"\"\"\n attempts = 0\n while True:\n try:\n self._test_connection(*args, **kwargs)\n break\n except ValidationError:\n wait_secs = min(self._max_wait, 2 ** attempts)\n attempts += 1\n if attempts < self._max_tries:\n time.sleep(wait_secs)\n else:\n raise\n"
] | class NetworkType(StringType):
def __init__(self, max_tries=5, max_wait=10, *args, **kwargs):
"""Validation type for external resources
Attempts to connect to the resource, backing off on failure.
Args:
max_tries: Max number of times to attempt a connection before failing
max_wait: Max number of seconds to wait between connection attempts. This can be
used to cap the exponential backoff.
"""
self._max_tries = max_tries
if self._max_tries < 1:
raise TypeError('max_tries must be a positive integer')
self._max_wait = max_wait
if self._max_wait < 1:
raise TypeError('max_wait must be >= 1')
super(NetworkType, self).__init__(*args, **kwargs)
@staticmethod
def _test_connection(url):
"""Attempt to connect to resource. Raise ValidationError on failure"""
raise NotImplementedError
|
kensho-technologies/grift | grift/loaders.py | VaultLoader.from_token | python | def from_token(cls, url, path, token):
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token) | Constructor: use token authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/token.html
Args:
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path) | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/loaders.py#L100-L111 | [
"def _fetch_secrets(vault_url, path, token):\n \"\"\"Read data from the vault path\"\"\"\n url = _url_joiner(vault_url, 'v1', path)\n resp = requests.get(url, headers=VaultLoader._get_headers(token))\n resp.raise_for_status()\n data = resp.json()\n if data.get('errors'):\n raise VaultException(u'Error fetching Vault secrets from path {}: {}'\n .format(path, data['errors']))\n return data['data']\n"
] | class VaultLoader(DictLoader):
"""Load secrets from a vault path"""
def __init__(self, source_dict, url, path, token):
"""Initializer.
Args:
source_dict: used to initialize the class. Use constructors to read from Vault.
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
self._vault_url = url
self._path = path
self._token = token
super(VaultLoader, self).__init__(source_dict)
@classmethod
@classmethod
def from_app_role(cls, url, path, role_id, secret_id):
"""Constructor: use AppRole authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/approle.html
Args:
url: Vault url
path: Vault path where secrets are stored
role_id: Vault RoleID
secret_id: Vault SecretID
"""
token = cls._fetch_app_role_token(url, role_id, secret_id)
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@staticmethod
def _get_headers(token):
"""Return token header to access vault"""
return {'X-Vault-Token': token}
@property
def _headers(self):
"""Return token header to access vault"""
return self._get_headers(self._token)
@staticmethod
def _fetch_secrets(vault_url, path, token):
"""Read data from the vault path"""
url = _url_joiner(vault_url, 'v1', path)
resp = requests.get(url, headers=VaultLoader._get_headers(token))
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault secrets from path {}: {}'
.format(path, data['errors']))
return data['data']
@staticmethod
def _fetch_app_role_token(vault_url, role_id, secret_id):
"""Get a Vault token, using the RoleID and SecretID"""
url = _url_joiner(vault_url, 'v1/auth/approle/login')
resp = requests.post(url, data={'role_id': role_id, 'secret_id': secret_id})
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault token: {}'.format(data['errors']))
return data['auth']['client_token']
def reload(self):
"""Reread secrets from the vault path"""
self._source = self._fetch_secrets(self._vault_url, self._path, self._token)
def lookup_token(self):
"""Convenience method: look up the vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/lookup-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error looking up Vault token: {}'.format(data['errors']))
return data
def renew_token(self):
"""Convenience method: renew Vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/renew-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error renewing Vault token: {}'.format(data['errors']))
return data
|
kensho-technologies/grift | grift/loaders.py | VaultLoader.from_app_role | python | def from_app_role(cls, url, path, role_id, secret_id):
token = cls._fetch_app_role_token(url, role_id, secret_id)
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token) | Constructor: use AppRole authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/approle.html
Args:
url: Vault url
path: Vault path where secrets are stored
role_id: Vault RoleID
secret_id: Vault SecretID | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/loaders.py#L114-L127 | null | class VaultLoader(DictLoader):
"""Load secrets from a vault path"""
def __init__(self, source_dict, url, path, token):
"""Initializer.
Args:
source_dict: used to initialize the class. Use constructors to read from Vault.
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
self._vault_url = url
self._path = path
self._token = token
super(VaultLoader, self).__init__(source_dict)
@classmethod
def from_token(cls, url, path, token):
"""Constructor: use token authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/token.html
Args:
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@classmethod
@staticmethod
def _get_headers(token):
"""Return token header to access vault"""
return {'X-Vault-Token': token}
@property
def _headers(self):
"""Return token header to access vault"""
return self._get_headers(self._token)
@staticmethod
def _fetch_secrets(vault_url, path, token):
"""Read data from the vault path"""
url = _url_joiner(vault_url, 'v1', path)
resp = requests.get(url, headers=VaultLoader._get_headers(token))
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault secrets from path {}: {}'
.format(path, data['errors']))
return data['data']
@staticmethod
def _fetch_app_role_token(vault_url, role_id, secret_id):
"""Get a Vault token, using the RoleID and SecretID"""
url = _url_joiner(vault_url, 'v1/auth/approle/login')
resp = requests.post(url, data={'role_id': role_id, 'secret_id': secret_id})
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault token: {}'.format(data['errors']))
return data['auth']['client_token']
def reload(self):
"""Reread secrets from the vault path"""
self._source = self._fetch_secrets(self._vault_url, self._path, self._token)
def lookup_token(self):
"""Convenience method: look up the vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/lookup-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error looking up Vault token: {}'.format(data['errors']))
return data
def renew_token(self):
"""Convenience method: renew Vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/renew-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error renewing Vault token: {}'.format(data['errors']))
return data
|
kensho-technologies/grift | grift/loaders.py | VaultLoader._fetch_secrets | python | def _fetch_secrets(vault_url, path, token):
url = _url_joiner(vault_url, 'v1', path)
resp = requests.get(url, headers=VaultLoader._get_headers(token))
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault secrets from path {}: {}'
.format(path, data['errors']))
return data['data'] | Read data from the vault path | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/loaders.py#L140-L149 | [
"def _url_joiner(*args):\n \"\"\"Helper: construct an url by joining sections with /\"\"\"\n return '/'.join(s.strip('/') for s in args)\n",
"def _get_headers(token):\n \"\"\"Return token header to access vault\"\"\"\n return {'X-Vault-Token': token}\n"
] | class VaultLoader(DictLoader):
"""Load secrets from a vault path"""
def __init__(self, source_dict, url, path, token):
"""Initializer.
Args:
source_dict: used to initialize the class. Use constructors to read from Vault.
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
self._vault_url = url
self._path = path
self._token = token
super(VaultLoader, self).__init__(source_dict)
@classmethod
def from_token(cls, url, path, token):
"""Constructor: use token authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/token.html
Args:
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@classmethod
def from_app_role(cls, url, path, role_id, secret_id):
"""Constructor: use AppRole authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/approle.html
Args:
url: Vault url
path: Vault path where secrets are stored
role_id: Vault RoleID
secret_id: Vault SecretID
"""
token = cls._fetch_app_role_token(url, role_id, secret_id)
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@staticmethod
def _get_headers(token):
"""Return token header to access vault"""
return {'X-Vault-Token': token}
@property
def _headers(self):
"""Return token header to access vault"""
return self._get_headers(self._token)
@staticmethod
@staticmethod
def _fetch_app_role_token(vault_url, role_id, secret_id):
"""Get a Vault token, using the RoleID and SecretID"""
url = _url_joiner(vault_url, 'v1/auth/approle/login')
resp = requests.post(url, data={'role_id': role_id, 'secret_id': secret_id})
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault token: {}'.format(data['errors']))
return data['auth']['client_token']
def reload(self):
"""Reread secrets from the vault path"""
self._source = self._fetch_secrets(self._vault_url, self._path, self._token)
def lookup_token(self):
"""Convenience method: look up the vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/lookup-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error looking up Vault token: {}'.format(data['errors']))
return data
def renew_token(self):
"""Convenience method: renew Vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/renew-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error renewing Vault token: {}'.format(data['errors']))
return data
|
kensho-technologies/grift | grift/loaders.py | VaultLoader._fetch_app_role_token | python | def _fetch_app_role_token(vault_url, role_id, secret_id):
url = _url_joiner(vault_url, 'v1/auth/approle/login')
resp = requests.post(url, data={'role_id': role_id, 'secret_id': secret_id})
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault token: {}'.format(data['errors']))
return data['auth']['client_token'] | Get a Vault token, using the RoleID and SecretID | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/loaders.py#L152-L160 | null | class VaultLoader(DictLoader):
"""Load secrets from a vault path"""
def __init__(self, source_dict, url, path, token):
"""Initializer.
Args:
source_dict: used to initialize the class. Use constructors to read from Vault.
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
self._vault_url = url
self._path = path
self._token = token
super(VaultLoader, self).__init__(source_dict)
@classmethod
def from_token(cls, url, path, token):
"""Constructor: use token authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/token.html
Args:
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@classmethod
def from_app_role(cls, url, path, role_id, secret_id):
"""Constructor: use AppRole authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/approle.html
Args:
url: Vault url
path: Vault path where secrets are stored
role_id: Vault RoleID
secret_id: Vault SecretID
"""
token = cls._fetch_app_role_token(url, role_id, secret_id)
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@staticmethod
def _get_headers(token):
"""Return token header to access vault"""
return {'X-Vault-Token': token}
@property
def _headers(self):
"""Return token header to access vault"""
return self._get_headers(self._token)
@staticmethod
def _fetch_secrets(vault_url, path, token):
"""Read data from the vault path"""
url = _url_joiner(vault_url, 'v1', path)
resp = requests.get(url, headers=VaultLoader._get_headers(token))
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault secrets from path {}: {}'
.format(path, data['errors']))
return data['data']
@staticmethod
def reload(self):
"""Reread secrets from the vault path"""
self._source = self._fetch_secrets(self._vault_url, self._path, self._token)
def lookup_token(self):
"""Convenience method: look up the vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/lookup-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error looking up Vault token: {}'.format(data['errors']))
return data
def renew_token(self):
"""Convenience method: renew Vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/renew-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error renewing Vault token: {}'.format(data['errors']))
return data
|
kensho-technologies/grift | grift/loaders.py | VaultLoader.reload | python | def reload(self):
self._source = self._fetch_secrets(self._vault_url, self._path, self._token) | Reread secrets from the vault path | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/loaders.py#L162-L164 | [
"def _fetch_secrets(vault_url, path, token):\n \"\"\"Read data from the vault path\"\"\"\n url = _url_joiner(vault_url, 'v1', path)\n resp = requests.get(url, headers=VaultLoader._get_headers(token))\n resp.raise_for_status()\n data = resp.json()\n if data.get('errors'):\n raise VaultException(u'Error fetching Vault secrets from path {}: {}'\n .format(path, data['errors']))\n return data['data']\n"
] | class VaultLoader(DictLoader):
"""Load secrets from a vault path"""
def __init__(self, source_dict, url, path, token):
"""Initializer.
Args:
source_dict: used to initialize the class. Use constructors to read from Vault.
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
self._vault_url = url
self._path = path
self._token = token
super(VaultLoader, self).__init__(source_dict)
@classmethod
def from_token(cls, url, path, token):
"""Constructor: use token authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/token.html
Args:
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@classmethod
def from_app_role(cls, url, path, role_id, secret_id):
"""Constructor: use AppRole authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/approle.html
Args:
url: Vault url
path: Vault path where secrets are stored
role_id: Vault RoleID
secret_id: Vault SecretID
"""
token = cls._fetch_app_role_token(url, role_id, secret_id)
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@staticmethod
def _get_headers(token):
"""Return token header to access vault"""
return {'X-Vault-Token': token}
@property
def _headers(self):
"""Return token header to access vault"""
return self._get_headers(self._token)
@staticmethod
def _fetch_secrets(vault_url, path, token):
"""Read data from the vault path"""
url = _url_joiner(vault_url, 'v1', path)
resp = requests.get(url, headers=VaultLoader._get_headers(token))
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault secrets from path {}: {}'
.format(path, data['errors']))
return data['data']
@staticmethod
def _fetch_app_role_token(vault_url, role_id, secret_id):
"""Get a Vault token, using the RoleID and SecretID"""
url = _url_joiner(vault_url, 'v1/auth/approle/login')
resp = requests.post(url, data={'role_id': role_id, 'secret_id': secret_id})
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault token: {}'.format(data['errors']))
return data['auth']['client_token']
def lookup_token(self):
"""Convenience method: look up the vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/lookup-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error looking up Vault token: {}'.format(data['errors']))
return data
def renew_token(self):
"""Convenience method: renew Vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/renew-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error renewing Vault token: {}'.format(data['errors']))
return data
|
kensho-technologies/grift | grift/loaders.py | VaultLoader.lookup_token | python | def lookup_token(self):
url = _url_joiner(self._vault_url, 'v1/auth/token/lookup-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error looking up Vault token: {}'.format(data['errors']))
return data | Convenience method: look up the vault token | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/loaders.py#L166-L174 | [
"def _url_joiner(*args):\n \"\"\"Helper: construct an url by joining sections with /\"\"\"\n return '/'.join(s.strip('/') for s in args)\n"
] | class VaultLoader(DictLoader):
"""Load secrets from a vault path"""
def __init__(self, source_dict, url, path, token):
"""Initializer.
Args:
source_dict: used to initialize the class. Use constructors to read from Vault.
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
self._vault_url = url
self._path = path
self._token = token
super(VaultLoader, self).__init__(source_dict)
@classmethod
def from_token(cls, url, path, token):
"""Constructor: use token authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/token.html
Args:
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@classmethod
def from_app_role(cls, url, path, role_id, secret_id):
"""Constructor: use AppRole authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/approle.html
Args:
url: Vault url
path: Vault path where secrets are stored
role_id: Vault RoleID
secret_id: Vault SecretID
"""
token = cls._fetch_app_role_token(url, role_id, secret_id)
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@staticmethod
def _get_headers(token):
"""Return token header to access vault"""
return {'X-Vault-Token': token}
@property
def _headers(self):
"""Return token header to access vault"""
return self._get_headers(self._token)
@staticmethod
def _fetch_secrets(vault_url, path, token):
"""Read data from the vault path"""
url = _url_joiner(vault_url, 'v1', path)
resp = requests.get(url, headers=VaultLoader._get_headers(token))
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault secrets from path {}: {}'
.format(path, data['errors']))
return data['data']
@staticmethod
def _fetch_app_role_token(vault_url, role_id, secret_id):
"""Get a Vault token, using the RoleID and SecretID"""
url = _url_joiner(vault_url, 'v1/auth/approle/login')
resp = requests.post(url, data={'role_id': role_id, 'secret_id': secret_id})
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault token: {}'.format(data['errors']))
return data['auth']['client_token']
def reload(self):
"""Reread secrets from the vault path"""
self._source = self._fetch_secrets(self._vault_url, self._path, self._token)
def renew_token(self):
"""Convenience method: renew Vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/renew-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error renewing Vault token: {}'.format(data['errors']))
return data
|
kensho-technologies/grift | grift/loaders.py | VaultLoader.renew_token | python | def renew_token(self):
url = _url_joiner(self._vault_url, 'v1/auth/token/renew-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error renewing Vault token: {}'.format(data['errors']))
return data | Convenience method: renew Vault token | train | https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/loaders.py#L176-L184 | [
"def _url_joiner(*args):\n \"\"\"Helper: construct an url by joining sections with /\"\"\"\n return '/'.join(s.strip('/') for s in args)\n"
] | class VaultLoader(DictLoader):
"""Load secrets from a vault path"""
def __init__(self, source_dict, url, path, token):
"""Initializer.
Args:
source_dict: used to initialize the class. Use constructors to read from Vault.
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
self._vault_url = url
self._path = path
self._token = token
super(VaultLoader, self).__init__(source_dict)
@classmethod
def from_token(cls, url, path, token):
"""Constructor: use token authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/token.html
Args:
url: Vault url
path: Vault path where secrets are stored
vault_token: token (must have access to vault path)
"""
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@classmethod
def from_app_role(cls, url, path, role_id, secret_id):
"""Constructor: use AppRole authentication to read secrets from a Vault path
See https://www.vaultproject.io/docs/auth/approle.html
Args:
url: Vault url
path: Vault path where secrets are stored
role_id: Vault RoleID
secret_id: Vault SecretID
"""
token = cls._fetch_app_role_token(url, role_id, secret_id)
source_dict = cls._fetch_secrets(url, path, token)
return cls(source_dict, url, path, token)
@staticmethod
def _get_headers(token):
"""Return token header to access vault"""
return {'X-Vault-Token': token}
@property
def _headers(self):
"""Return token header to access vault"""
return self._get_headers(self._token)
@staticmethod
def _fetch_secrets(vault_url, path, token):
"""Read data from the vault path"""
url = _url_joiner(vault_url, 'v1', path)
resp = requests.get(url, headers=VaultLoader._get_headers(token))
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault secrets from path {}: {}'
.format(path, data['errors']))
return data['data']
@staticmethod
def _fetch_app_role_token(vault_url, role_id, secret_id):
"""Get a Vault token, using the RoleID and SecretID"""
url = _url_joiner(vault_url, 'v1/auth/approle/login')
resp = requests.post(url, data={'role_id': role_id, 'secret_id': secret_id})
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault token: {}'.format(data['errors']))
return data['auth']['client_token']
def reload(self):
"""Reread secrets from the vault path"""
self._source = self._fetch_secrets(self._vault_url, self._path, self._token)
def lookup_token(self):
"""Convenience method: look up the vault token"""
url = _url_joiner(self._vault_url, 'v1/auth/token/lookup-self')
resp = requests.get(url, headers=self._headers)
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error looking up Vault token: {}'.format(data['errors']))
return data
|
crm416/semantic | semantic/units.py | ConversionService.parseUnits | python | def parseUnits(self, inp):
quantity = self.convert(inp)
units = ' '.join(str(quantity.units).split(' ')[1:])
return NumberService.parseMagnitude(quantity.item()) + " " + units | Carries out a conversion (represented as a string) and returns the
result as a human-readable string.
Args:
inp (str): Text representing a unit conversion, which should
include a magnitude, a description of the initial units,
and a description of the target units to which the quantity
should be converted.
Returns:
A quantities object representing the converted quantity and its new
units. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/units.py#L55-L71 | [
"def parseMagnitude(m):\n \"\"\"Parses a number m into a human-ready string representation.\n For example, crops off floats if they're too accurate.\n\n Arguments:\n m (float): Floating-point number to be cleaned.\n\n Returns:\n Human-ready string description of the number.\n \"\"\"\n m = NumberService().parse(m)\n\n def toDecimalPrecision(n, k):\n return float(\"%.*f\" % (k, round(n, k)))\n\n # Cast to two digits of precision\n digits = 2\n magnitude = toDecimalPrecision(m, digits)\n\n # If value is really small, keep going\n while not magnitude:\n digits += 1\n magnitude = toDecimalPrecision(m, digits)\n\n # If item is less than one, go one beyond 'necessary' number of digits\n if m < 1.0:\n magnitude = toDecimalPrecision(m, digits + 1)\n\n # Ignore decimal accuracy if irrelevant\n if int(magnitude) == magnitude:\n magnitude = int(magnitude)\n\n # Adjust for scientific notation\n magString = str(magnitude)\n magString = re.sub(r'(\\d)e-(\\d+)',\n '\\g<1> times ten to the negative \\g<2>', magString)\n magString = re.sub(r'(\\d)e\\+(\\d+)',\n '\\g<1> times ten to the \\g<2>', magString)\n magString = re.sub(r'-(\\d+)', 'negative \\g<1>', magString)\n magString = re.sub(r'\\b0(\\d+)', '\\g<1>', magString)\n return magString\n",
"def convert(self, inp):\n \"\"\"Converts a string representation of some quantity of units into a\n quantities object.\n\n Args:\n inp (str): A textual representation of some quantity of units,\n e.g., \"fifty kilograms\".\n\n Returns:\n A quantities object representing the described quantity and its\n units.\n \"\"\"\n inp = self._preprocess(inp)\n\n n = NumberService().longestNumber(inp)\n units = self.extractUnits(inp)\n\n # Convert to quantity object, attempt conversion\n quantity = pq.Quantity(float(n), units[0])\n quantity.units = units[1]\n\n return quantity\n"
] | class ConversionService(object):
__exponents__ = {
'square': 2,
'squared': 2,
'cubed': 3
}
def _preprocess(self, inp):
def handleExponents(inp):
m = re.search(r'\bsquare (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsquare (\w+)', r'\g<1>^2', inp)
m = re.search(r'\bsquared (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsquared (\w+)', r'\g<1>^2', inp)
m = re.search(r'\b(\w+) squared', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\b(\w+) squared', r'\g<1>^2', inp)
m = re.search(r'\bsq (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsq (\w+)', r'\g<1>^2', inp)
m = re.search(r'\b(\w+) cubed', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\b(\w+) cubed', r'\g<1>^3', inp)
m = re.search(r'\bcubic (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bcubic (\w+)', r'\g<1>^3', inp)
service = NumberService()
m = re.search(r'\b(\w+) to the (\w+)( power)?', inp)
if m and self.isValidUnit(m.group(1)):
if m.group(2) in service.__ordinals__:
exp = service.parseMagnitude(m.group(2))
inp = re.sub(r'\b(\w+) to the (\w+)( power)?',
r'\g<1>^' + str(exp), inp)
return inp
inp = re.sub(r'\sper\s', r' / ', inp)
inp = handleExponents(inp)
return inp
def isValidUnit(self, w):
"""Checks if a string represents a valid quantities unit.
Args:
w (str): A string to be tested against the set of valid
quantities units.
Returns:
True if the string can be used as a unit in the quantities
module.
"""
bad = set(['point', 'a'])
if w in bad:
return False
try:
pq.Quantity(0.0, w)
return True
except:
return w == '/'
def extractUnits(self, inp):
"""Collects all the valid units from an inp string. Works by
appending consecutive words from the string and cross-referncing
them with a set of valid units.
Args:
inp (str): Some text which hopefully contains descriptions
of different units.
Returns:
A list of strings, each entry in which is a valid quantities
unit.
"""
inp = self._preprocess(inp)
units = []
description = ""
for w in inp.split(' '):
if self.isValidUnit(w) or w == '/':
if description:
description += " "
description += w
else:
if description:
units.append(description)
description = ""
if description:
units.append(description)
return units
def convert(self, inp):
"""Converts a string representation of some quantity of units into a
quantities object.
Args:
inp (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
"""
inp = self._preprocess(inp)
n = NumberService().longestNumber(inp)
units = self.extractUnits(inp)
# Convert to quantity object, attempt conversion
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity
|
crm416/semantic | semantic/units.py | ConversionService.isValidUnit | python | def isValidUnit(self, w):
bad = set(['point', 'a'])
if w in bad:
return False
try:
pq.Quantity(0.0, w)
return True
except:
return w == '/' | Checks if a string represents a valid quantities unit.
Args:
w (str): A string to be tested against the set of valid
quantities units.
Returns:
True if the string can be used as a unit in the quantities
module. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/units.py#L73-L92 | null | class ConversionService(object):
__exponents__ = {
'square': 2,
'squared': 2,
'cubed': 3
}
def _preprocess(self, inp):
def handleExponents(inp):
m = re.search(r'\bsquare (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsquare (\w+)', r'\g<1>^2', inp)
m = re.search(r'\bsquared (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsquared (\w+)', r'\g<1>^2', inp)
m = re.search(r'\b(\w+) squared', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\b(\w+) squared', r'\g<1>^2', inp)
m = re.search(r'\bsq (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsq (\w+)', r'\g<1>^2', inp)
m = re.search(r'\b(\w+) cubed', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\b(\w+) cubed', r'\g<1>^3', inp)
m = re.search(r'\bcubic (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bcubic (\w+)', r'\g<1>^3', inp)
service = NumberService()
m = re.search(r'\b(\w+) to the (\w+)( power)?', inp)
if m and self.isValidUnit(m.group(1)):
if m.group(2) in service.__ordinals__:
exp = service.parseMagnitude(m.group(2))
inp = re.sub(r'\b(\w+) to the (\w+)( power)?',
r'\g<1>^' + str(exp), inp)
return inp
inp = re.sub(r'\sper\s', r' / ', inp)
inp = handleExponents(inp)
return inp
def parseUnits(self, inp):
"""Carries out a conversion (represented as a string) and returns the
result as a human-readable string.
Args:
inp (str): Text representing a unit conversion, which should
include a magnitude, a description of the initial units,
and a description of the target units to which the quantity
should be converted.
Returns:
A quantities object representing the converted quantity and its new
units.
"""
quantity = self.convert(inp)
units = ' '.join(str(quantity.units).split(' ')[1:])
return NumberService.parseMagnitude(quantity.item()) + " " + units
def extractUnits(self, inp):
"""Collects all the valid units from an inp string. Works by
appending consecutive words from the string and cross-referncing
them with a set of valid units.
Args:
inp (str): Some text which hopefully contains descriptions
of different units.
Returns:
A list of strings, each entry in which is a valid quantities
unit.
"""
inp = self._preprocess(inp)
units = []
description = ""
for w in inp.split(' '):
if self.isValidUnit(w) or w == '/':
if description:
description += " "
description += w
else:
if description:
units.append(description)
description = ""
if description:
units.append(description)
return units
def convert(self, inp):
"""Converts a string representation of some quantity of units into a
quantities object.
Args:
inp (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
"""
inp = self._preprocess(inp)
n = NumberService().longestNumber(inp)
units = self.extractUnits(inp)
# Convert to quantity object, attempt conversion
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity
|
crm416/semantic | semantic/units.py | ConversionService.extractUnits | python | def extractUnits(self, inp):
inp = self._preprocess(inp)
units = []
description = ""
for w in inp.split(' '):
if self.isValidUnit(w) or w == '/':
if description:
description += " "
description += w
else:
if description:
units.append(description)
description = ""
if description:
units.append(description)
return units | Collects all the valid units from an inp string. Works by
appending consecutive words from the string and cross-referncing
them with a set of valid units.
Args:
inp (str): Some text which hopefully contains descriptions
of different units.
Returns:
A list of strings, each entry in which is a valid quantities
unit. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/units.py#L94-L123 | [
"def _preprocess(self, inp):\n def handleExponents(inp):\n m = re.search(r'\\bsquare (\\w+)', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\bsquare (\\w+)', r'\\g<1>^2', inp)\n\n m = re.search(r'\\bsquared (\\w+)', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\bsquared (\\w+)', r'\\g<1>^2', inp)\n\n m = re.search(r'\\b(\\w+) squared', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\b(\\w+) squared', r'\\g<1>^2', inp)\n\n m = re.search(r'\\bsq (\\w+)', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\bsq (\\w+)', r'\\g<1>^2', inp)\n\n m = re.search(r'\\b(\\w+) cubed', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\b(\\w+) cubed', r'\\g<1>^3', inp)\n\n m = re.search(r'\\bcubic (\\w+)', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\bcubic (\\w+)', r'\\g<1>^3', inp)\n\n service = NumberService()\n m = re.search(r'\\b(\\w+) to the (\\w+)( power)?', inp)\n if m and self.isValidUnit(m.group(1)):\n if m.group(2) in service.__ordinals__:\n exp = service.parseMagnitude(m.group(2))\n inp = re.sub(r'\\b(\\w+) to the (\\w+)( power)?',\n r'\\g<1>^' + str(exp), inp)\n\n return inp\n\n inp = re.sub(r'\\sper\\s', r' / ', inp)\n inp = handleExponents(inp)\n\n return inp\n",
"def isValidUnit(self, w):\n \"\"\"Checks if a string represents a valid quantities unit.\n\n Args:\n w (str): A string to be tested against the set of valid\n quantities units.\n\n Returns:\n True if the string can be used as a unit in the quantities\n module.\n \"\"\"\n bad = set(['point', 'a'])\n if w in bad:\n return False\n\n try:\n pq.Quantity(0.0, w)\n return True\n except:\n return w == '/'\n"
] | class ConversionService(object):
__exponents__ = {
'square': 2,
'squared': 2,
'cubed': 3
}
def _preprocess(self, inp):
def handleExponents(inp):
m = re.search(r'\bsquare (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsquare (\w+)', r'\g<1>^2', inp)
m = re.search(r'\bsquared (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsquared (\w+)', r'\g<1>^2', inp)
m = re.search(r'\b(\w+) squared', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\b(\w+) squared', r'\g<1>^2', inp)
m = re.search(r'\bsq (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsq (\w+)', r'\g<1>^2', inp)
m = re.search(r'\b(\w+) cubed', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\b(\w+) cubed', r'\g<1>^3', inp)
m = re.search(r'\bcubic (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bcubic (\w+)', r'\g<1>^3', inp)
service = NumberService()
m = re.search(r'\b(\w+) to the (\w+)( power)?', inp)
if m and self.isValidUnit(m.group(1)):
if m.group(2) in service.__ordinals__:
exp = service.parseMagnitude(m.group(2))
inp = re.sub(r'\b(\w+) to the (\w+)( power)?',
r'\g<1>^' + str(exp), inp)
return inp
inp = re.sub(r'\sper\s', r' / ', inp)
inp = handleExponents(inp)
return inp
def parseUnits(self, inp):
"""Carries out a conversion (represented as a string) and returns the
result as a human-readable string.
Args:
inp (str): Text representing a unit conversion, which should
include a magnitude, a description of the initial units,
and a description of the target units to which the quantity
should be converted.
Returns:
A quantities object representing the converted quantity and its new
units.
"""
quantity = self.convert(inp)
units = ' '.join(str(quantity.units).split(' ')[1:])
return NumberService.parseMagnitude(quantity.item()) + " " + units
def isValidUnit(self, w):
"""Checks if a string represents a valid quantities unit.
Args:
w (str): A string to be tested against the set of valid
quantities units.
Returns:
True if the string can be used as a unit in the quantities
module.
"""
bad = set(['point', 'a'])
if w in bad:
return False
try:
pq.Quantity(0.0, w)
return True
except:
return w == '/'
def convert(self, inp):
"""Converts a string representation of some quantity of units into a
quantities object.
Args:
inp (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
"""
inp = self._preprocess(inp)
n = NumberService().longestNumber(inp)
units = self.extractUnits(inp)
# Convert to quantity object, attempt conversion
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity
|
crm416/semantic | semantic/units.py | ConversionService.convert | python | def convert(self, inp):
inp = self._preprocess(inp)
n = NumberService().longestNumber(inp)
units = self.extractUnits(inp)
# Convert to quantity object, attempt conversion
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity | Converts a string representation of some quantity of units into a
quantities object.
Args:
inp (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/units.py#L125-L146 | [
"def longestNumber(self, inp):\n \"\"\"Extracts the longest valid numerical description from a string.\n Not guaranteed to return a result even if some valid numerical\n description exists (i.e., method is not particularly advanced).\n\n Args:\n inp (str): An arbitrary string, hopefully containing a number.\n\n Returns:\n The number with the longest string description in input,\n or None if not found.\n \"\"\"\n split = inp.split(' ')\n\n # Assume just a single number\n numStart = None\n numEnd = None\n for i, w in enumerate(split):\n if self.isValid(w):\n if numStart is None:\n numStart = i\n numEnd = i\n else:\n # Check for ordinal, which would signify end\n w = re.sub(r'(\\w+)s(\\b)', '\\g<1>\\g<2>', w)\n if w in self.__ordinals__:\n if self.isValid(' '.join(split[numStart:i + 1])):\n numEnd = i\n break\n description = ' '.join(split[numStart:numEnd + 1])\n return self.parse(description)\n",
"def _preprocess(self, inp):\n def handleExponents(inp):\n m = re.search(r'\\bsquare (\\w+)', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\bsquare (\\w+)', r'\\g<1>^2', inp)\n\n m = re.search(r'\\bsquared (\\w+)', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\bsquared (\\w+)', r'\\g<1>^2', inp)\n\n m = re.search(r'\\b(\\w+) squared', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\b(\\w+) squared', r'\\g<1>^2', inp)\n\n m = re.search(r'\\bsq (\\w+)', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\bsq (\\w+)', r'\\g<1>^2', inp)\n\n m = re.search(r'\\b(\\w+) cubed', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\b(\\w+) cubed', r'\\g<1>^3', inp)\n\n m = re.search(r'\\bcubic (\\w+)', inp)\n if m and self.isValidUnit(m.group(1)):\n inp = re.sub(r'\\bcubic (\\w+)', r'\\g<1>^3', inp)\n\n service = NumberService()\n m = re.search(r'\\b(\\w+) to the (\\w+)( power)?', inp)\n if m and self.isValidUnit(m.group(1)):\n if m.group(2) in service.__ordinals__:\n exp = service.parseMagnitude(m.group(2))\n inp = re.sub(r'\\b(\\w+) to the (\\w+)( power)?',\n r'\\g<1>^' + str(exp), inp)\n\n return inp\n\n inp = re.sub(r'\\sper\\s', r' / ', inp)\n inp = handleExponents(inp)\n\n return inp\n",
"def extractUnits(self, inp):\n \"\"\"Collects all the valid units from an inp string. Works by\n appending consecutive words from the string and cross-referncing\n them with a set of valid units.\n\n Args:\n inp (str): Some text which hopefully contains descriptions\n of different units.\n\n Returns:\n A list of strings, each entry in which is a valid quantities\n unit.\n \"\"\"\n inp = self._preprocess(inp)\n\n units = []\n description = \"\"\n for w in inp.split(' '):\n if self.isValidUnit(w) or w == '/':\n if description:\n description += \" \"\n description += w\n else:\n if description:\n units.append(description)\n description = \"\"\n\n if description:\n units.append(description)\n return units\n"
] | class ConversionService(object):
__exponents__ = {
'square': 2,
'squared': 2,
'cubed': 3
}
def _preprocess(self, inp):
def handleExponents(inp):
m = re.search(r'\bsquare (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsquare (\w+)', r'\g<1>^2', inp)
m = re.search(r'\bsquared (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsquared (\w+)', r'\g<1>^2', inp)
m = re.search(r'\b(\w+) squared', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\b(\w+) squared', r'\g<1>^2', inp)
m = re.search(r'\bsq (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bsq (\w+)', r'\g<1>^2', inp)
m = re.search(r'\b(\w+) cubed', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\b(\w+) cubed', r'\g<1>^3', inp)
m = re.search(r'\bcubic (\w+)', inp)
if m and self.isValidUnit(m.group(1)):
inp = re.sub(r'\bcubic (\w+)', r'\g<1>^3', inp)
service = NumberService()
m = re.search(r'\b(\w+) to the (\w+)( power)?', inp)
if m and self.isValidUnit(m.group(1)):
if m.group(2) in service.__ordinals__:
exp = service.parseMagnitude(m.group(2))
inp = re.sub(r'\b(\w+) to the (\w+)( power)?',
r'\g<1>^' + str(exp), inp)
return inp
inp = re.sub(r'\sper\s', r' / ', inp)
inp = handleExponents(inp)
return inp
def parseUnits(self, inp):
"""Carries out a conversion (represented as a string) and returns the
result as a human-readable string.
Args:
inp (str): Text representing a unit conversion, which should
include a magnitude, a description of the initial units,
and a description of the target units to which the quantity
should be converted.
Returns:
A quantities object representing the converted quantity and its new
units.
"""
quantity = self.convert(inp)
units = ' '.join(str(quantity.units).split(' ')[1:])
return NumberService.parseMagnitude(quantity.item()) + " " + units
def isValidUnit(self, w):
"""Checks if a string represents a valid quantities unit.
Args:
w (str): A string to be tested against the set of valid
quantities units.
Returns:
True if the string can be used as a unit in the quantities
module.
"""
bad = set(['point', 'a'])
if w in bad:
return False
try:
pq.Quantity(0.0, w)
return True
except:
return w == '/'
def extractUnits(self, inp):
"""Collects all the valid units from an inp string. Works by
appending consecutive words from the string and cross-referncing
them with a set of valid units.
Args:
inp (str): Some text which hopefully contains descriptions
of different units.
Returns:
A list of strings, each entry in which is a valid quantities
unit.
"""
inp = self._preprocess(inp)
units = []
description = ""
for w in inp.split(' '):
if self.isValidUnit(w) or w == '/':
if description:
description += " "
description += w
else:
if description:
units.append(description)
description = ""
if description:
units.append(description)
return units
|
crm416/semantic | semantic/solver.py | MathService._preprocess | python | def _preprocess(inp):
inp = re.sub(r'(\b)a(\b)', r'\g<1>one\g<2>', inp)
inp = re.sub(r'to the (.*) power', r'to \g<1>', inp)
inp = re.sub(r'to the (.*?)(\b)', r'to \g<1>\g<2>', inp)
inp = re.sub(r'log of', r'log', inp)
inp = re.sub(r'(square )?root( of)?', r'sqrt', inp)
inp = re.sub(r'squared', r'to two', inp)
inp = re.sub(r'cubed', r'to three', inp)
inp = re.sub(r'divided?( by)?', r'divide', inp)
inp = re.sub(r'(\b)over(\b)', r'\g<1>divide\g<2>', inp)
inp = re.sub(r'(\b)EE(\b)', r'\g<1>e\g<2>', inp)
inp = re.sub(r'(\b)E(\b)', r'\g<1>e\g<2>', inp)
inp = re.sub(r'(\b)pie(\b)', r'\g<1>pi\g<2>', inp)
inp = re.sub(r'(\b)PI(\b)', r'\g<1>pi\g<2>', inp)
def findImplicitMultiplications(inp):
"""Replace omitted 'times' references."""
def findConstantMultiplications(inp):
split = inp.split(' ')
revision = ""
converter = NumberService()
for i, w in enumerate(split):
if i > 0 and w in MathService.__constants__:
if converter.isValid(split[i - 1]):
revision += " times"
if not revision:
revision = w
else:
revision += " " + w
return revision
def findUnaryMultiplications(inp):
split = inp.split(' ')
revision = ""
for i, w in enumerate(split):
if i > 0 and w in MathService.__unaryOperators__:
last_op = split[i - 1]
binary = last_op in MathService.__binaryOperators__
unary = last_op in MathService.__unaryOperators__
if last_op and not (binary or unary):
revision += " times"
if not revision:
revision = w
else:
revision += " " + w
return revision
return findUnaryMultiplications(findConstantMultiplications(inp))
return findImplicitMultiplications(inp) | Revise wording to match canonical and expected forms. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/solver.py#L66-L123 | [
"def findImplicitMultiplications(inp):\n \"\"\"Replace omitted 'times' references.\"\"\"\n\n def findConstantMultiplications(inp):\n split = inp.split(' ')\n revision = \"\"\n\n converter = NumberService()\n for i, w in enumerate(split):\n if i > 0 and w in MathService.__constants__:\n if converter.isValid(split[i - 1]):\n revision += \" times\"\n if not revision:\n revision = w\n else:\n revision += \" \" + w\n\n return revision\n\n def findUnaryMultiplications(inp):\n split = inp.split(' ')\n revision = \"\"\n\n for i, w in enumerate(split):\n if i > 0 and w in MathService.__unaryOperators__:\n last_op = split[i - 1]\n\n binary = last_op in MathService.__binaryOperators__\n unary = last_op in MathService.__unaryOperators__\n\n if last_op and not (binary or unary):\n revision += \" times\"\n if not revision:\n revision = w\n else:\n revision += \" \" + w\n\n return revision\n\n return findUnaryMultiplications(findConstantMultiplications(inp))\n"
] | class MathService(object):
__constants__ = {
'e': e,
'E': e,
'EE': e,
'pi': pi,
'pie': pi
}
__unaryOperators__ = {
'log': log,
'sine': sin,
'sin': sin,
'cosine': cos,
'cos': cos,
'tan': tan,
'tangent': tan,
'arcsine': asin,
'arcsin': asin,
'asin': asin,
'arccosine': acos,
'arccos': acos,
'acos': acos,
'arctanget': atan,
'arctan': atan,
'atan': atan,
'sqrt': sqrt
}
__binaryOperators__ = {
'plus': add,
'add': add,
'sum': add,
'minus': sub,
'sub': sub,
'subtract': sub,
'less': sub,
'over': div,
'divide': div,
'times': mul,
'multiply': mul,
'to': pow
}
@staticmethod
def _applyBinary(a, b, op):
a = float(a)
b = float(b)
return op(a, b)
@staticmethod
def _applyUnary(a, op):
a = float(a)
return op(a)
@staticmethod
@staticmethod
def _calculate(numbers, symbols):
"""Calculates a final value given a set of numbers and symbols."""
if len(numbers) is 1:
return numbers[0]
precedence = [[pow], [mul, div], [add, sub]]
# Find most important operation
for op_group in precedence:
for i, op in enumerate(symbols):
if op in op_group:
# Apply operation
a = numbers[i]
b = numbers[i + 1]
result = MathService._applyBinary(a, b, op)
new_numbers = numbers[:i] + [result] + numbers[i + 2:]
new_symbols = symbols[:i] + symbols[i + 1:]
return MathService._calculate(new_numbers, new_symbols)
def parseEquation(self, inp):
"""Solves the equation specified by the input string.
Args:
inp (str): An equation, specified in words, containing some
combination of numbers, binary, and unary operations.
Returns:
The floating-point result of carrying out the computation.
"""
inp = MathService._preprocess(inp)
split = inp.split(' ')
# Recursive call on unary operators
for i, w in enumerate(split):
if w in self.__unaryOperators__:
op = self.__unaryOperators__[w]
# Split equation into halves
eq1 = ' '.join(split[:i])
eq2 = ' '.join(split[i + 1:])
# Calculate second half
result = MathService._applyUnary(self.parseEquation(eq2), op)
return self.parseEquation(eq1 + " " + str(result))
def extractNumbersAndSymbols(inp):
numbers = []
symbols = []
# Divide into values (numbers), operators (symbols)
next_number = ""
for w in inp.split(' '):
if w in self.__binaryOperators__:
symbols.append(self.__binaryOperators__[w])
if next_number:
numbers.append(next_number)
next_number = ""
else:
if next_number:
next_number += " "
next_number += w
if next_number:
numbers.append(next_number)
# Cast numbers from words to integers
def convert(n):
if n in self.__constants__:
return self.__constants__[n]
converter = NumberService()
return converter.parse(n)
numbers = [convert(n) for n in numbers]
return numbers, symbols
numbers, symbols = extractNumbersAndSymbols(inp)
return MathService._calculate(numbers, symbols)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.