repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
saltstack/salt
salt/modules/nilrt_ip.py
get_interfaces_details
python
def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))}
Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L494-L512
null
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
_change_state_legacy
python
def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True
Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L515-L536
null
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
_change_state
python
def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True
Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L539-L564
[ "def _connected(service):\n '''\n Verify if a connman service is connected\n '''\n state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State')\n return state == 'online' or state == 'ready'\n", "def _interface_to_service(iface):\n '''\n returns the coresponding service to given interface if exists, otherwise return None\n '''\n for _service in _get_services():\n service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service))\n if service_info.get_property('Ethernet')['Interface'] == iface:\n return _service\n return None\n", "def _change_state_legacy(interface, new_state):\n '''\n Enable or disable an interface on a legacy distro\n\n Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.\n\n :param interface: interface label\n :param new_state: up or down\n :return: True if the service was enabled, otherwise an exception will be thrown.\n :rtype: bool\n '''\n initial_mode = _get_adapter_mode_info(interface)\n _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled')\n if initial_mode == 'ethercat':\n __salt__['system.set_reboot_required_witnessed']()\n else:\n out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state))\n if out['retcode'] != 0:\n msg = 'Couldn\\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable',\n interface, out['stderr'])\n raise salt.exceptions.CommandExecutionError(msg)\n return True\n" ]
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
_save_config
python
def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg)
Helper function to persist a configuration in the ini file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L643-L651
null
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
set_ethercat
python
def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported')
Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L654-L676
[ "def _get_adapter_mode_info(interface):\n '''\n return adaptermode for given interface\n '''\n mode = _load_config(interface, ['mode'])['mode'].lower()\n return mode if mode in ['disabled', 'ethercat'] else 'tcpip'\n", "def _save_config(section, token, value):\n '''\n Helper function to persist a configuration in the ini file\n '''\n cmd = NIRTCFG_PATH\n cmd += ' --set section={0},token=\\'{1}\\',value=\\'{2}\\''.format(section, token, value)\n if __salt__['cmd.run_all'](cmd)['retcode'] != 0:\n exc_msg = 'Error: could not set {} to {} for {}\\n'.format(token, value, section)\n raise salt.exceptions.CommandExecutionError(exc_msg)\n" ]
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
set_dhcp_linklocal_all
python
def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True
Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L687-L728
[ "def _interface_to_service(iface):\n '''\n returns the coresponding service to given interface if exists, otherwise return None\n '''\n for _service in _get_services():\n service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service))\n if service_info.get_property('Ethernet')['Interface'] == iface:\n return _service\n return None\n", "def _get_adapter_mode_info(interface):\n '''\n return adaptermode for given interface\n '''\n mode = _load_config(interface, ['mode'])['mode'].lower()\n return mode if mode in ['disabled', 'ethercat'] else 'tcpip'\n", "def _save_config(section, token, value):\n '''\n Helper function to persist a configuration in the ini file\n '''\n cmd = NIRTCFG_PATH\n cmd += ' --set section={0},token=\\'{1}\\',value=\\'{2}\\''.format(section, token, value)\n if __salt__['cmd.run_all'](cmd)['retcode'] != 0:\n exc_msg = 'Error: could not set {} to {} for {}\\n'.format(token, value, section)\n raise salt.exceptions.CommandExecutionError(exc_msg)\n", "def _restart(interface):\n '''\n Disable and enable an interface\n '''\n disable(interface)\n enable(interface)\n" ]
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
set_dhcp_only_all
python
def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True
Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L731-L757
[ "def _get_adapter_mode_info(interface):\n '''\n return adaptermode for given interface\n '''\n mode = _load_config(interface, ['mode'])['mode'].lower()\n return mode if mode in ['disabled', 'ethercat'] else 'tcpip'\n", "def _save_config(section, token, value):\n '''\n Helper function to persist a configuration in the ini file\n '''\n cmd = NIRTCFG_PATH\n cmd += ' --set section={0},token=\\'{1}\\',value=\\'{2}\\''.format(section, token, value)\n if __salt__['cmd.run_all'](cmd)['retcode'] != 0:\n exc_msg = 'Error: could not set {} to {} for {}\\n'.format(token, value, section)\n raise salt.exceptions.CommandExecutionError(exc_msg)\n", "def _restart(interface):\n '''\n Disable and enable an interface\n '''\n disable(interface)\n enable(interface)\n" ]
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
_configure_static_interface
python
def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True
Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L789-L828
null
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
set_static_all
python
def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True
Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L831-L895
[ "def _space_delimited_list(value):\n '''\n validate that a value contains one or more space-delimited values\n '''\n if isinstance(value, six.string_types):\n items = value.split(' ')\n valid = items and all(items)\n else:\n valid = hasattr(value, '__iter__') and (value != [])\n\n if valid:\n return True, 'space-delimited string'\n return False, '{0} is not a valid list.\\n'.format(value)\n", "def _validate_ipv4(value):\n '''\n validate ipv4 values\n '''\n if len(value) == 3:\n if not salt.utils.validate.net.ipv4_addr(value[0].strip()):\n return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0])\n if not salt.utils.validate.net.netmask(value[1].strip()):\n return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1])\n if not salt.utils.validate.net.ipv4_addr(value[2].strip()):\n return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2])\n else:\n return False, 'Invalid value: {0} for ipv4 option'.format(value)\n return True, ''\n", "def _interface_to_service(iface):\n '''\n returns the coresponding service to given interface if exists, otherwise return None\n '''\n for _service in _get_services():\n service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service))\n if service_info.get_property('Ethernet')['Interface'] == iface:\n return _service\n return None\n", "def _get_adapter_mode_info(interface):\n '''\n return adaptermode for given interface\n '''\n mode = _load_config(interface, ['mode'])['mode'].lower()\n return mode if mode in ['disabled', 'ethercat'] else 'tcpip'\n", "def _save_config(section, token, value):\n '''\n Helper function to persist a configuration in the ini file\n '''\n cmd = NIRTCFG_PATH\n cmd += ' --set section={0},token=\\'{1}\\',value=\\'{2}\\''.format(section, token, value)\n if __salt__['cmd.run_all'](cmd)['retcode'] != 0:\n exc_msg = 'Error: could not set {} to {} for {}\\n'.format(token, value, section)\n raise salt.exceptions.CommandExecutionError(exc_msg)\n", "def _restart(interface):\n '''\n Disable and enable an interface\n '''\n disable(interface)\n enable(interface)\n", "def _configure_static_interface(interface, **settings):\n '''\n Configure an interface that is not detected as a service by Connman (i.e. link is down)\n\n :param interface: interface label\n :param settings:\n - ip\n - netmask\n - gateway\n - dns\n - name\n :return: True if settings were applied successfully.\n :rtype: bool\n '''\n interface = pyiface.Interface(name=interface)\n parser = configparser.ConfigParser()\n if os.path.exists(INTERFACES_CONFIG):\n try:\n with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file:\n parser.readfp(config_file)\n except configparser.MissingSectionHeaderError:\n pass\n hwaddr = interface.hwaddr[:-1]\n hwaddr_section_number = ''.join(hwaddr.split(':'))\n if not parser.has_section('interface_{0}'.format(hwaddr_section_number)):\n parser.add_section('interface_{0}'.format(hwaddr_section_number))\n ip_address = settings.get('ip', '0.0.0.0')\n netmask = settings.get('netmask', '0.0.0.0')\n gateway = settings.get('gateway', '0.0.0.0')\n dns_servers = settings.get('dns', '')\n name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number))\n parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'.\n format(ip_address, netmask, gateway))\n parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers)\n parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name)\n parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr)\n parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet')\n with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file:\n parser.write(config_file)\n return True\n" ]
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
get_interface
python
def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None
Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L898-L912
[ "def _dict_to_string(dictionary):\n '''\n converts a dictionary object into a list of strings\n '''\n ret = ''\n for key, val in sorted(dictionary.items()):\n if isinstance(val, dict):\n for line in _dict_to_string(val):\n ret += six.text_type(key) + '-' + line + '\\n'\n elif isinstance(val, list):\n text = ' '.join([six.text_type(item) for item in val])\n ret += six.text_type(key) + ': ' + text + '\\n'\n else:\n ret += six.text_type(key) + ': ' + six.text_type(val) + '\\n'\n return ret.splitlines()\n", "def get_interfaces_details():\n '''\n Get details about all the interfaces on the minion\n\n :return: information about all interfaces omitting loopback\n :rtype: dictionary\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' ip.get_interfaces_details\n '''\n _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0]\n if __grains__['lsb_distrib_id'] == 'nilrt':\n return {'interfaces': list(map(_get_interface_info, _interfaces))}\n # filter just the services\n _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None]\n return {'interfaces': list(map(_get_info, _interfaces))}\n" ]
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
build_interface
python
def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface)
Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L915-L948
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument\n '''\n Enable the specified interface\n\n Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.\n\n :param str interface: interface label\n :return: True if the service was enabled, otherwise an exception will be thrown.\n :rtype: bool\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' ip.up interface-label\n '''\n return _change_state(interface, 'up')\n", "def get_interface(iface):\n '''\n Returns details about given interface.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' ip.get_interface eth0\n '''\n _interfaces = get_interfaces_details()\n for _interface in _interfaces['interfaces']:\n if _interface['connectionid'] == iface:\n return _dict_to_string(_interface)\n return None\n", "def set_dhcp_linklocal_all(interface):\n '''\n Configure specified adapter to use DHCP with linklocal fallback\n\n Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.\n\n :param str interface: interface label\n :return: True if the settings were applied, otherwise an exception will be thrown.\n :rtype: bool\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' ip.set_dhcp_linklocal_all interface-label\n '''\n if __grains__['lsb_distrib_id'] == 'nilrt':\n initial_mode = _get_adapter_mode_info(interface)\n _save_config(interface, 'Mode', 'TCPIP')\n _save_config(interface, 'dhcpenabled', '1')\n _save_config(interface, 'linklocalenabled', '1')\n if initial_mode == 'ethercat':\n __salt__['system.set_reboot_required_witnessed']()\n else:\n _restart(interface)\n return True\n service = _interface_to_service(interface)\n if not service:\n raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface))\n service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service))\n ipv4 = service.get_property('IPv4.Configuration')\n ipv4['Method'] = dbus.String('dhcp', variant_level=1)\n ipv4['Address'] = dbus.String('', variant_level=1)\n ipv4['Netmask'] = dbus.String('', variant_level=1)\n ipv4['Gateway'] = dbus.String('', variant_level=1)\n try:\n service.set_property('IPv4.Configuration', ipv4)\n service.set_property('Nameservers.Configuration', ['']) # reset nameservers list\n except Exception as exc:\n exc_msg = 'Couldn\\'t set dhcp linklocal for service: {0}\\nError: {1}\\n'.format(service, exc)\n raise salt.exceptions.CommandExecutionError(exc_msg)\n return True\n", "def set_static_all(interface, address, netmask, gateway, nameservers=None):\n '''\n Configure specified adapter to use ipv4 manual settings\n\n Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.\n\n :param str interface: interface label\n :param str address: ipv4 address\n :param str netmask: ipv4 netmask\n :param str gateway: ipv4 gateway\n :param str nameservers: list of nameservers servers separated by spaces (Optional)\n :return: True if the settings were applied, otherwise an exception will be thrown.\n :rtype: bool\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' ip.set_static_all interface-label address netmask gateway nameservers\n '''\n validate, msg = _validate_ipv4([address, netmask, gateway])\n if not validate:\n raise salt.exceptions.CommandExecutionError(msg)\n if nameservers:\n validate, msg = _space_delimited_list(nameservers)\n if not validate:\n raise salt.exceptions.CommandExecutionError(msg)\n if not isinstance(nameservers, list):\n nameservers = nameservers.split(' ')\n if __grains__['lsb_distrib_id'] == 'nilrt':\n initial_mode = _get_adapter_mode_info(interface)\n _save_config(interface, 'Mode', 'TCPIP')\n _save_config(interface, 'dhcpenabled', '0')\n _save_config(interface, 'linklocalenabled', '0')\n _save_config(interface, 'IP_Address', address)\n _save_config(interface, 'Subnet_Mask', netmask)\n _save_config(interface, 'Gateway', gateway)\n if nameservers:\n _save_config(interface, 'DNS_Address', nameservers[0])\n if initial_mode == 'ethercat':\n __salt__['system.set_reboot_required_witnessed']()\n else:\n _restart(interface)\n return True\n service = _interface_to_service(interface)\n if not service:\n if interface in pyiface.getIfaces():\n return _configure_static_interface(interface, **{'ip': address,\n 'dns': ','.join(nameservers) if nameservers else '',\n 'netmask': netmask, 'gateway': gateway})\n raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface))\n service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service))\n ipv4 = service.get_property('IPv4.Configuration')\n ipv4['Method'] = dbus.String('manual', variant_level=1)\n ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1)\n ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1)\n ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1)\n try:\n service.set_property('IPv4.Configuration', ipv4)\n if nameservers:\n service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers])\n except Exception as exc:\n exc_msg = 'Couldn\\'t set manual settings for service: {0}\\nError: {1}\\n'.format(service, exc)\n raise salt.exceptions.CommandExecutionError(exc_msg)\n return True\n" ]
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
build_network_settings
python
def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes
Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L951-L978
null
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
get_network_settings
python
def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings
Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L981-L998
[ "def _get_state():\n '''\n Returns the state of connman\n '''\n try:\n return pyconnman.ConnManager().get_property('State')\n except KeyError:\n return 'offline'\n except dbus.DBusException as exc:\n raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc))\n" ]
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
saltstack/salt
salt/modules/nilrt_ip.py
apply_network_settings
python
def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: stop = __salt__['service.stop']('connman') time.sleep(2) res = stop and __salt__['service.start']('connman') return hostname_res and res
Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L1001-L1042
null
# -*- coding: utf-8 -*- ''' The networking module for NI Linux Real-Time distro ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time import os import re # Import salt libs import salt.exceptions import salt.utils.files import salt.utils.validate.net # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import map, range, configparser from salt.ext import six # pylint: enable=import-error,redefined-builtin,no-name-in-module try: import pyconnman except ImportError: pyconnman = None try: import dbus except ImportError: dbus = None try: import pyiface from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None try: from requests.structures import CaseInsensitiveDict except ImportError: CaseInsensitiveDict = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'ip' SERVICE_PATH = '/net/connman/service/' INTERFACES_CONFIG = '/var/lib/connman/interfaces.config' NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg' INI_FILE = '/etc/natinst/share/ni-rt.ini' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] NIRTCFG_ETHERCAT = 'EtherCAT' def _assume_condition(condition, err): ''' Raise an exception if the condition is false ''' if not condition: raise RuntimeError(err) def __virtual__(): ''' Confine this module to NI Linux Real-Time based distros ''' try: msg = 'The nilrt_ip module could not be loaded: unsupported OS family' _assume_condition(__grains__['os_family'] == 'NILinuxRT', msg) _assume_condition(CaseInsensitiveDict, 'The python package request is not installed') _assume_condition(pyiface, 'The python pyiface package is not installed') if __grains__['lsb_distrib_id'] != 'nilrt': _assume_condition(pyconnman, 'The python package pyconnman is not installed') _assume_condition(dbus, 'The python DBus package is not installed') _assume_condition(_get_state() != 'offline', 'Connman is not running') except RuntimeError as exc: return False, str(exc) return __virtualname__ def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc)) def _get_technologies(): ''' Returns the technologies of connman ''' tech = '' technologies = pyconnman.ConnManager().get_technologies() for path, params in technologies: tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format( path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1) return tech def _get_services(): ''' Returns a list with all connman services ''' serv = [] services = pyconnman.ConnManager().get_services() for path, _ in services: serv.append(six.text_type(path[len(SERVICE_PATH):])) return serv def _connected(service): ''' Verify if a connman service is connected ''' state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready' def _space_delimited_list(value): ''' validate that a value contains one or more space-delimited values ''' if isinstance(value, six.string_types): items = value.split(' ') valid = items and all(items) else: valid = hasattr(value, '__iter__') and (value != []) if valid: return True, 'space-delimited string' return False, '{0} is not a valid list.\n'.format(value) def _validate_ipv4(value): ''' validate ipv4 values ''' if len(value) == 3: if not salt.utils.validate.net.ipv4_addr(value[0].strip()): return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0]) if not salt.utils.validate.net.netmask(value[1].strip()): return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1]) if not salt.utils.validate.net.ipv4_addr(value[2].strip()): return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2]) else: return False, 'Invalid value: {0} for ipv4 option'.format(value) return True, '' def _interface_to_service(iface): ''' returns the coresponding service to given interface if exists, otherwise return None ''' for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None def _get_service_info(service): ''' return details about given connman service ''' service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) data = { 'label': service, 'wireless': service_info.get_property('Type') == 'wifi', 'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']), 'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address']) } state = service_info.get_property('State') if state == 'ready' or state == 'online': data['up'] = True data['ipv4'] = { 'gateway': '0.0.0.0' } ipv4 = 'IPv4' if service_info.get_property('IPv4')['Method'] == 'manual': ipv4 += '.Configuration' ipv4_info = service_info.get_property(ipv4) for info in ['Method', 'Address', 'Netmask', 'Gateway']: value = ipv4_info.get(info) if value is None: log.warning('Unable to get IPv4 %s for service %s\n', info, service) continue if info == 'Method': info = 'requestmode' if value == 'dhcp': value = 'dhcp_linklocal' elif value in ('manual', 'fixed'): value = 'static' data['ipv4'][info.lower()] = six.text_type(value) ipv6_info = service_info.get_property('IPv6') for info in ['Address', 'Prefix', 'Gateway']: value = ipv6_info.get(info) if value is None: log.warning('Unable to get IPv6 %s for service %s\n', info, service) continue if 'ipv6' not in data: data['ipv6'] = {} data['ipv6'][info.lower()] = [six.text_type(value)] nameservers = [] for nameserver_prop in service_info.get_property('Nameservers'): nameservers.append(six.text_type(nameserver_prop)) data['ipv4']['dns'] = nameservers else: data['up'] = False data['ipv4'] = { 'requestmode': 'disabled' } data['ipv4']['supportedrequestmodes'] = [ 'static', 'dhcp_linklocal', 'disabled' ] return data def _get_dns_info(): ''' return dns list ''' dns_list = [] try: with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info: lines = dns_info.readlines() for line in lines: if 'nameserver' in line: dns = line.split()[1].strip() if dns not in dns_list: dns_list.append(dns) except IOError: log.warning('Could not get domain\n') return dns_list def _remove_quotes(value): ''' Remove leading and trailing double quotes if they exist. ''' # nirtcfg writes values with quotes if len(value) > 1 and value[0] == value[-1] == '\"': value = value[1:-1] return value def _load_config(section, options, default_value='', filename=INI_FILE): ''' Get values for some options and a given section from a config file. :param section: Section Name :param options: List of options :param default_value: Default value if an option doesn't have a value. Default is empty string. :param filename: config file. Default is INI_FILE. :return: ''' results = {} if not options: return results with salt.utils.files.fopen(filename, 'r') as config_file: config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict) config_parser.readfp(config_file) for option in options: results[option] = _remove_quotes(config_parser.get(section, option)) \ if config_parser.has_option(section, option) else default_value return results def _get_request_mode_info(interface): ''' return requestmode for given interface ''' settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1) link_local_enabled = int(settings['linklocalenabled']) dhcp_enabled = int(settings['dhcpenabled']) if dhcp_enabled == 1: return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only' else: if link_local_enabled == 1: return 'linklocal_only' if link_local_enabled == 0: return 'static' # some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables # when selecting "DHCP or Link Local" from MAX, so return it by default to avoid # having the requestmode "None" because none of the conditions above matched. return 'dhcp_linklocal' def _get_adapter_mode_info(interface): ''' return adaptermode for given interface ''' mode = _load_config(interface, ['mode'])['mode'].lower() return mode if mode in ['disabled', 'ethercat'] else 'tcpip' def _get_possible_adapter_modes(interface, blacklist): ''' Return possible adapter modes for a given interface using a blacklist. :param interface: interface name :param blacklist: given blacklist :return: list of possible adapter modes ''' adapter_modes = [] protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower() sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface)) with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file: uevent_lines = uevent_file.readlines() uevent_devtype = "" for line in uevent_lines: if line.startswith("DEVTYPE="): uevent_devtype = line.split('=')[1].strip() break for adapter_mode in blacklist: if adapter_mode == '_': continue value = blacklist.get(adapter_mode, {}) if value.get('additional_protocol') and adapter_mode not in protocols: continue if interface not in value['name'] \ and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']): adapter_modes += [adapter_mode] return adapter_modes def _get_static_info(interface): ''' Return information about an interface from config file. :param interface: interface label ''' data = { 'connectionid': interface.name, 'label': interface.name, 'hwaddr': interface.hwaddr[:-1], 'up': False, 'ipv4': { 'supportedrequestmodes': ['static', 'dhcp_linklocal', 'disabled'], 'requestmode': 'static' }, 'wireless': False } hwaddr_section_number = ''.join(data['hwaddr'].split(':')) if os.path.exists(INTERFACES_CONFIG): information = _load_config(hwaddr_section_number, ['IPv4', 'Nameservers'], filename=INTERFACES_CONFIG) if information['IPv4'] != '': ipv4_information = information['IPv4'].split('/') data['ipv4']['address'] = ipv4_information[0] data['ipv4']['dns'] = information['Nameservers'].split(',') data['ipv4']['netmask'] = ipv4_information[1] data['ipv4']['gateway'] = ipv4_information[2] return data def _get_base_interface_info(interface): ''' return base details about given interface ''' blacklist = { 'tcpip': { 'name': [], 'type': [], 'additional_protocol': False }, 'disabled': { 'name': ['eth0'], 'type': ['gadget'], 'additional_protocol': False }, 'ethercat': { 'name': ['eth0'], 'type': ['gadget', 'usb', 'wlan'], 'additional_protocol': True }, '_': { 'usb': 'sys', 'gadget': 'uevent', 'wlan': 'uevent' } } return { 'label': interface.name, 'connectionid': interface.name, 'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist), 'adapter_mode': _get_adapter_mode_info(interface.name), 'up': interface.flags & IFF_RUNNING != 0, 'ipv4': { 'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'], 'requestmode': _get_request_mode_info(interface.name) }, 'hwaddr': interface.hwaddr[:-1] } def _get_ethercat_interface_info(interface): ''' return details about given ethercat interface ''' base_information = _get_base_interface_info(interface) base_information['ethercat'] = { 'masterid': _load_config(interface.name, ['MasterID'])['MasterID'] } return base_information def _get_tcpip_interface_info(interface): ''' return details about given tcpip interface ''' base_information = _get_base_interface_info(interface) if base_information['ipv4']['requestmode'] == 'static': settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address']) base_information['ipv4']['address'] = settings['IP_Address'] base_information['ipv4']['netmask'] = settings['Subnet_Mask'] base_information['ipv4']['gateway'] = settings['Gateway'] base_information['ipv4']['dns'] = [settings['DNS_Address']] elif base_information['up']: base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr) base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask) base_information['ipv4']['gateway'] = '0.0.0.0' base_information['ipv4']['dns'] = _get_dns_info() with salt.utils.files.fopen('/proc/net/route', 'r') as route_file: pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name), re.MULTILINE) match = pattern.search(route_file.read()) iface_gateway_hex = None if not match else match.group(1) if iface_gateway_hex is not None and len(iface_gateway_hex) == 8: base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16)) for i in range(6, -1, -2)]) return base_information def _get_interface_info(interface): ''' return details about given interface ''' adapter_mode = _get_adapter_mode_info(interface.name) if adapter_mode == 'disabled': return _get_base_interface_info(interface) elif adapter_mode == 'ethercat': return _get_ethercat_interface_info(interface) return _get_tcpip_interface_info(interface) def _dict_to_string(dictionary): ''' converts a dictionary object into a list of strings ''' ret = '' for key, val in sorted(dictionary.items()): if isinstance(val, dict): for line in _dict_to_string(val): ret += six.text_type(key) + '-' + line + '\n' elif isinstance(val, list): text = ' '.join([six.text_type(item) for item in val]) ret += six.text_type(key) + ': ' + text + '\n' else: ret += six.text_type(key) + ': ' + six.text_type(val) + '\n' return ret.splitlines() def _get_info(interface): ''' Return information about an interface if it's associated with a service. :param interface: interface label ''' service = _interface_to_service(interface.name) return _get_service_info(service) def get_interfaces_details(): ''' Get details about all the interfaces on the minion :return: information about all interfaces omitting loopback :rtype: dictionary CLI Example: .. code-block:: bash salt '*' ip.get_interfaces_details ''' _interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0] if __grains__['lsb_distrib_id'] == 'nilrt': return {'interfaces': list(map(_get_interface_info, _interfaces))} # filter just the services _interfaces = [interface for interface in _interfaces if _interface_to_service(interface.name) is not None] return {'interfaces': list(map(_get_info, _interfaces))} def _change_state_legacy(interface, new_state): ''' Enable or disable an interface on a legacy distro Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state)) if out['retcode'] != 0: msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable', interface, out['stderr']) raise salt.exceptions.CommandExecutionError(msg) return True def _change_state(interface, new_state): ''' Enable or disable an interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param interface: interface label :param new_state: up or down :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool ''' if __grains__['lsb_distrib_id'] == 'nilrt': return _change_state_legacy(interface, new_state) service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) connected = _connected(service) if (not connected and new_state == 'up') or (connected and new_state == 'down'): service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) try: state = service.connect() if new_state == 'up' else service.disconnect() return state is None except Exception: raise salt.exceptions.CommandExecutionError('Couldn\'t {0} service: {1}\n' .format('enable' if new_state == 'up' else 'disable', service)) return True def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.up interface-label ''' return _change_state(interface, 'up') def enable(interface): ''' Enable the specified interface Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was enabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.enable interface-label ''' return up(interface) def down(interface, iface_type=None): # pylint: disable=unused-argument ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.down interface-label ''' return _change_state(interface, 'down') def disable(interface): ''' Disable the specified interface Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the service was disabled, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.disable interface-label ''' return down(interface) def _save_config(section, token, value): ''' Helper function to persist a configuration in the ini file ''' cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg) def set_ethercat(interface, master_id): ''' Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't already use EtherCAT adapter mode, otherwise will return true. :param interface: interface label :param master_id: EtherCAT Master ID :return: True if the settings were applied, otherwise an exception will be thrown. CLI Example: .. code-block:: bash salt '*' ip.set_ethercat interface-label master-id ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', NIRTCFG_ETHERCAT) _save_config(interface, 'MasterID', master_id) if initial_mode != 'ethercat': __salt__['system.set_reboot_required_witnessed']() return True raise salt.exceptions.CommandExecutionError('EtherCAT is not supported') def _restart(interface): ''' Disable and enable an interface ''' disable(interface) enable(interface) def set_dhcp_linklocal_all(interface): ''' Configure specified adapter to use DHCP with linklocal fallback Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_dhcp_linklocal_all interface-label ''' if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('dhcp', variant_level=1) ipv4['Address'] = dbus.String('', variant_level=1) ipv4['Netmask'] = dbus.String('', variant_level=1) ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: exc_msg = 'Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def set_linklocal_only_all(interface): ''' Configure specified adapter to use linklocal only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.linklocal_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '1') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True def _configure_static_interface(interface, **settings): ''' Configure an interface that is not detected as a service by Connman (i.e. link is down) :param interface: interface label :param settings: - ip - netmask - gateway - dns - name :return: True if settings were applied successfully. :rtype: bool ''' interface = pyiface.Interface(name=interface) parser = configparser.ConfigParser() if os.path.exists(INTERFACES_CONFIG): try: with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file: parser.readfp(config_file) except configparser.MissingSectionHeaderError: pass hwaddr = interface.hwaddr[:-1] hwaddr_section_number = ''.join(hwaddr.split(':')) if not parser.has_section('interface_{0}'.format(hwaddr_section_number)): parser.add_section('interface_{0}'.format(hwaddr_section_number)) ip_address = settings.get('ip', '0.0.0.0') netmask = settings.get('netmask', '0.0.0.0') gateway = settings.get('gateway', '0.0.0.0') dns_servers = settings.get('dns', '') name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number)) parser.set('interface_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'. format(ip_address, netmask, gateway)) parser.set('interface_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers) parser.set('interface_{0}'.format(hwaddr_section_number), 'Name', name) parser.set('interface_{0}'.format(hwaddr_section_number), 'MAC', hwaddr) parser.set('interface_{0}'.format(hwaddr_section_number), 'Type', 'ethernet') with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file: parser.write(config_file) return True def set_static_all(interface, address, netmask, gateway, nameservers=None): ''' Configure specified adapter to use ipv4 manual settings Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway :param str nameservers: list of nameservers servers separated by spaces (Optional) :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) if nameservers: validate, msg = _space_delimited_list(nameservers) if not validate: raise salt.exceptions.CommandExecutionError(msg) if not isinstance(nameservers, list): nameservers = nameservers.split(' ') if __grains__['lsb_distrib_id'] == 'nilrt': initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '0') _save_config(interface, 'linklocalenabled', '0') _save_config(interface, 'IP_Address', address) _save_config(interface, 'Subnet_Mask', netmask) _save_config(interface, 'Gateway', gateway) if nameservers: _save_config(interface, 'DNS_Address', nameservers[0]) if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True service = _interface_to_service(interface) if not service: if interface in pyiface.getIfaces(): return _configure_static_interface(interface, **{'ip': address, 'dns': ','.join(nameservers) if nameservers else '', 'netmask': netmask, 'gateway': gateway}) raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) ipv4['Address'] = dbus.String('{0}'.format(address), variant_level=1) ipv4['Netmask'] = dbus.String('{0}'.format(netmask), variant_level=1) ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) if nameservers: service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: exc_msg = 'Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc) raise salt.exceptions.CommandExecutionError(exc_msg) return True def get_interface(iface): ''' Returns details about given interface. CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' _interfaces = get_interfaces_details() for _interface in _interfaces['interfaces']: if _interface['connectionid'] == iface: return _dict_to_string(_interface) return None def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') if iface_type != 'eth': raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type)) if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp set_dhcp_linklocal_all(iface) elif settings['proto'] != 'static': exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto']) raise salt.exceptions.CommandExecutionError(exc_msg) else: address = settings['ipaddr'] netmask = settings['netmask'] gateway = settings['gateway'] dns = [] for key, val in six.iteritems(settings): if 'dns' in key or 'domain' in key: dns += val set_static_all(iface, address, netmask, gateway, dns) if enabled: up(iface) return get_interface(iface) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') changes = [] if 'networking' in settings: if settings['networking'] in _CONFIG_TRUE: __salt__['service.enable']('connman') else: __salt__['service.disable']('connman') if 'hostname' in settings: new_hostname = settings['hostname'].split('.', 1)[0] settings['hostname'] = new_hostname old_hostname = __salt__['network.get_hostname'] if new_hostname != old_hostname: __salt__['network.mod_hostname'](new_hostname) changes.append('hostname={0}'.format(new_hostname)) return changes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' if __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version.') settings = [] networking = 'no' if _get_state() == 'offline' else 'yes' settings.append('networking={0}'.format(networking)) hostname = __salt__['network.get_hostname'] settings.append('hostname={0}'.format(hostname)) return settings
saltstack/salt
salt/utils/event.py
get_event
python
def get_event( node, sock_dir=None, transport='zeromq', opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' Return an event object suitable for the named transport :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' sock_dir = sock_dir or opts['sock_dir'] # TODO: AIO core is separate from transport if node == 'master': return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) return SaltEvent(node, sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors)
Return an event object suitable for the named transport :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L119-L145
null
# -*- coding: utf-8 -*- ''' Manage events Events are all fired off via a zeromq 'pub' socket, and listened to with local zeromq 'sub' sockets All of the formatting is self contained in the event module, so we should be able to modify the structure in the future since the same module used to read events is the same module used to fire off events. Old style event messages were comprised of two parts delimited at the 20 char point. The first 20 characters are used for the zeromq subscriber to match publications and 20 characters was chosen because it was at the time a few more characters than the length of a jid (Job ID). Any tags of length less than 20 characters were padded with "|" chars out to 20 characters. Although not explicit, the data for an event comprised a python dict that was serialized by msgpack. New style event messages support event tags longer than 20 characters while still being backwards compatible with old style tags. The longer tags better enable name spaced event tags which tend to be longer. Moreover, the constraint that the event data be a python dict is now an explicit constraint and fire-event will now raise a ValueError if not. Tags must be ascii safe strings, that is, have values less than 0x80 Since the msgpack dict (map) indicators have values greater than or equal to 0x80 it can be unambiguously determined if the start of data is at char 21 or not. In the new style, when the tag is longer than 20 characters, an end of tag string is appended to the tag given by the string constant TAGEND, that is, two line feeds '\n\n'. When the tag is less than 20 characters then the tag is padded with pipes "|" out to 20 characters as before. When the tag is exactly 20 characters no padded is done. The get_event method intelligently figures out if the tag is longer than 20 characters. The convention for namespacing is to use dot characters "." as the name space delimiter. The name space "salt" is reserved by SaltStack for internal events. For example: Namespaced tag 'salt.runner.manage.status.start' ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import os import time import fnmatch import hashlib import logging import datetime import sys try: from collections.abc import MutableMapping except ImportError: from collections import MutableMapping from multiprocessing.util import Finalize from salt.ext.six.moves import range # Import third party libs from salt.ext import six import tornado.ioloop import tornado.iostream # Import salt libs import salt.config import salt.payload import salt.utils.asynchronous import salt.utils.cache import salt.utils.dicttrim import salt.utils.files import salt.utils.platform import salt.utils.process import salt.utils.stringutils import salt.utils.zeromq import salt.log.setup import salt.defaults.exitcodes import salt.transport.ipc import salt.transport.client log = logging.getLogger(__name__) # The SUB_EVENT set is for functions that require events fired based on # component executions, like the state system SUB_EVENT = ('state.highstate', 'state.sls') TAGEND = str('\n\n') # long tag delimiter TAGPARTER = str('/') # name spaced tag delimiter SALT = 'salt' # base prefix for all salt/ events # dict map of namespaced base tag prefixes for salt events TAGS = { 'auth': 'auth', # prefix for all salt/auth events 'job': 'job', # prefix for all salt/job events (minion jobs) 'key': 'key', # prefix for all salt/key events 'minion': 'minion', # prefix for all salt/minion events # (minion sourced events) 'syndic': 'syndic', # prefix for all salt/syndic events # (syndic minion sourced events) 'run': 'run', # prefix for all salt/run events (salt runners) 'wheel': 'wheel', # prefix for all salt/wheel events 'cloud': 'cloud', # prefix for all salt/cloud events 'fileserver': 'fileserver', # prefix for all salt/fileserver events 'queue': 'queue', # prefix for all salt/queue events } def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False, keep_loop=False): ''' Return an event object suitable for the named transport ''' # TODO: AIO core is separate from transport if opts['transport'] in ('zeromq', 'tcp', 'detect'): return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors, keep_loop=keep_loop) def fire_args(opts, jid, tag_data, prefix=''): ''' Fire an event containing the arguments passed to an orchestration job ''' try: tag_suffix = [jid, 'args'] except NameError: pass else: tag = tagify(tag_suffix, prefix) try: _event = get_master_event(opts, opts['sock_dir'], listen=False) _event.fire_event(tag_data, tag=tag) except Exception as exc: # Don't let a problem here hold up the rest of the orchestration log.warning( 'Failed to fire args event %s with data %s: %s', tag, tag_data, exc ) def tagify(suffix='', prefix='', base=SALT): ''' convenience function to build a namespaced event tag string from joining with the TABPART character the base, prefix and suffix If string prefix is a valid key in TAGS Then use the value of key prefix Else use prefix string If suffix is a list Then join all string elements of suffix individually Else use string suffix ''' parts = [base, TAGS.get(prefix, prefix)] if hasattr(suffix, 'append'): # list so extend parts parts.extend(suffix) else: # string so append parts.append(suffix) for index, _ in enumerate(parts): try: parts[index] = salt.utils.stringutils.to_str(parts[index]) except TypeError: parts[index] = str(parts[index]) return TAGPARTER.join([part for part in parts if part]) def update_stats(stats, start_time, data): ''' Calculate the master stats and return the updated stat info ''' end_time = time.time() cmd = data['cmd'] # the jid is used as the create time try: jid = data['jid'] except KeyError: try: jid = data['data']['__pub_jid'] except KeyError: log.info('jid not found in data, stats not updated') return stats create_time = int(time.mktime(time.strptime(jid, '%Y%m%d%H%M%S%f'))) latency = start_time - create_time duration = end_time - start_time stats[cmd]['runs'] += 1 stats[cmd]['latency'] = (stats[cmd]['latency'] * (stats[cmd]['runs'] - 1) + latency) / stats[cmd]['runs'] stats[cmd]['mean'] = (stats[cmd]['mean'] * (stats[cmd]['runs'] - 1) + duration) / stats[cmd]['runs'] return stats class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass class MasterEvent(SaltEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible Create a master event management object ''' def __init__( self, sock_dir, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): super(MasterEvent, self).__init__( 'master', sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) class LocalClientEvent(MasterEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible This class is just used to differentiate who is handling the events, specially on logs, but it's the same as MasterEvent. ''' class NamespacedEvent(object): ''' A wrapper for sending events within a specific base namespace ''' def __init__(self, event, base, print_func=None): self.event = event self.base = base self.print_func = print_func def fire_event(self, data, tag): self.event.fire_event(data, tagify(tag, base=self.base)) if self.print_func is not None: self.print_func(tag, data) class MinionEvent(SaltEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible Create a master event management object ''' def __init__(self, opts, listen=True, io_loop=None, keep_loop=False, raise_errors=False): super(MinionEvent, self).__init__( 'minion', sock_dir=opts.get('sock_dir'), opts=opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) class AsyncEventPublisher(object): ''' An event publisher class intended to run in an ioloop (within a single process) TODO: remove references to "minion_event" whenever we need to use this for other things ''' def __init__(self, opts, io_loop=None): self.opts = salt.config.DEFAULT_MINION_OPTS.copy() default_minion_sock_dir = self.opts['sock_dir'] self.opts.update(opts) self.io_loop = io_loop or tornado.ioloop.IOLoop.current() self._closing = False hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] epub_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash) ) if os.path.exists(epub_sock_path): os.unlink(epub_sock_path) epull_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash) ) if os.path.exists(epull_sock_path): os.unlink(epull_sock_path) if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_pub_port']) epull_uri = int(self.opts['tcp_pull_port']) else: epub_uri = epub_sock_path epull_uri = epull_sock_path log.debug('%s PUB socket URI: %s', self.__class__.__name__, epub_uri) log.debug('%s PULL socket URI: %s', self.__class__.__name__, epull_uri) minion_sock_dir = self.opts['sock_dir'] if not os.path.isdir(minion_sock_dir): # Let's try to create the directory defined on the configuration # file try: os.makedirs(minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: %s', exc) # Let's not fail yet and try using the default path if minion_sock_dir == default_minion_sock_dir: # We're already trying the default system path, stop now! raise if not os.path.isdir(default_minion_sock_dir): try: os.makedirs(default_minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: %s', exc) # Let's stop at this stage raise self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish ) log.info('Starting pull socket on %s', epull_uri) with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling minion events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() def __del__(self): self.close() class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' The interface that takes master events and republishes them out to anyone who wants to listen ''' def __init__(self, opts, **kwargs): super(EventPublisher, self).__init__(**kwargs) self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() self.opts.update(opts) self._closing = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def run(self): ''' Bind the pub and pull sockets for events ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.io_loop = tornado.ioloop.IOLoop() with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_master_pub_port']) epull_uri = int(self.opts['tcp_master_pull_port']) else: epub_uri = os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc' ) epull_uri = os.path.join( self.opts['sock_dir'], 'master_event_pull.ipc' ) self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish, ) # Start the master event publisher with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() if (self.opts['ipc_mode'] != 'tcp' and ( self.opts['publisher_acl'] or self.opts['external_auth'])): os.chmod(os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666) # Make sure the IO loop and respective sockets are closed and # destroyed Finalize(self, self.close, exitpriority=15) self.io_loop.start() def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling master events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() if hasattr(self, 'io_loop'): self.io_loop.close() def _handle_signals(self, signum, sigframe): self.close() super(EventPublisher, self)._handle_signals(signum, sigframe) def __del__(self): self.close() class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' A dedicated process which listens to the master event bus and queues and forwards events to the specified returner. ''' def __new__(cls, *args, **kwargs): if sys.platform.startswith('win'): # This is required for Windows. On Linux, when a process is # forked, the module namespace is copied and the current process # gets all of sys.modules from where the fork happens. This is not # the case for Windows. import salt.minion # pylint: disable=unused-import instance = super(EventReturn, cls).__new__(cls, *args, **kwargs) return instance def __init__(self, opts, **kwargs): ''' Initialize the EventReturn system Return an EventReturn instance ''' super(EventReturn, self).__init__(**kwargs) self.opts = opts self.event_return_queue = self.opts['event_return_queue'] local_minion_opts = self.opts.copy() local_minion_opts['file_client'] = 'local' self.minion = salt.minion.MasterMinion(local_minion_opts) self.event_queue = [] self.stop = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): # Flush and terminate if self.event_queue: self.flush_events() self.stop = True super(EventReturn, self)._handle_signals(signum, sigframe) def flush_events(self): if isinstance(self.opts['event_return'], list): # Multiple event returners for r in self.opts['event_return']: log.debug('Calling event returner %s, one of many.', r) event_return = '{0}.event_return'.format(r) self._flush_event_single(event_return) else: # Only a single event returner log.debug('Calling event returner %s, only one configured.', self.opts['event_return']) event_return = '{0}.event_return'.format( self.opts['event_return'] ) self._flush_event_single(event_return) del self.event_queue[:] def _flush_event_single(self, event_return): if event_return in self.minion.returners: try: self.minion.returners[event_return](self.event_queue) except Exception as exc: log.error('Could not store events - returner \'%s\' raised ' 'exception: %s', event_return, exc) # don't waste processing power unnecessarily on converting a # potentially huge dataset to a string if log.level <= logging.DEBUG: log.debug('Event data that caused an exception: %s', self.event_queue) else: log.error('Could not store return for event(s) - returner ' '\'%s\' not found.', event_return) def run(self): ''' Spin up the multiprocess event returner ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.event = get_event('master', opts=self.opts, listen=True) events = self.event.iter_events(full=True) self.event.fire_event({}, 'salt/event_listen/start') try: for event in events: if event['tag'] == 'salt/event/exit': self.stop = True if self._filter(event): self.event_queue.append(event) if len(self.event_queue) >= self.event_return_queue: self.flush_events() if self.stop: break finally: # flush all we have at this moment if self.event_queue: self.flush_events() def _filter(self, event): ''' Take an event and run it through configured filters. Returns True if event should be stored, else False ''' tag = event['tag'] if self.opts['event_return_whitelist']: ret = False else: ret = True for whitelist_match in self.opts['event_return_whitelist']: if fnmatch.fnmatch(tag, whitelist_match): ret = True break for blacklist_match in self.opts['event_return_blacklist']: if fnmatch.fnmatch(tag, blacklist_match): ret = False break return ret class StateFire(object): ''' Evaluate the data from a state run and fire events on the master and minion for each returned chunk that is not "green" This object is made to only run on a minion ''' def __init__(self, opts, auth=None): self.opts = opts if not auth: self.auth = salt.crypt.SAuth(self.opts) else: self.auth = auth def fire_master(self, data, tag, preload=None): ''' Fire an event off on the master server CLI Example: .. code-block:: bash salt '*' event.fire_master 'stuff to be in the event' 'tag' ''' load = {} if preload: load.update(preload) load.update({ 'id': self.opts['id'], 'tag': tag, 'data': data, 'cmd': '_minion_event', 'tok': self.auth.gen_token(b'salt'), }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True def fire_running(self, running): ''' Pass in a state "running" dict, this is the return dict from a state call. The dict will be processed and fire events. By default yellows and reds fire events on the master and minion, but this can be configured. ''' load = {'id': self.opts['id'], 'events': [], 'cmd': '_minion_event'} for stag in sorted( running, key=lambda k: running[k].get('__run_num__', 0)): if running[stag]['result'] and not running[stag]['changes']: continue tag = 'state_{0}_{1}'.format( six.text_type(running[stag]['result']), 'True' if running[stag]['changes'] else 'False') load['events'].append({ 'tag': tag, 'data': running[stag], }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True
saltstack/salt
salt/utils/event.py
get_master_event
python
def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False, keep_loop=False): ''' Return an event object suitable for the named transport ''' # TODO: AIO core is separate from transport if opts['transport'] in ('zeromq', 'tcp', 'detect'): return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors, keep_loop=keep_loop)
Return an event object suitable for the named transport
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L148-L154
null
# -*- coding: utf-8 -*- ''' Manage events Events are all fired off via a zeromq 'pub' socket, and listened to with local zeromq 'sub' sockets All of the formatting is self contained in the event module, so we should be able to modify the structure in the future since the same module used to read events is the same module used to fire off events. Old style event messages were comprised of two parts delimited at the 20 char point. The first 20 characters are used for the zeromq subscriber to match publications and 20 characters was chosen because it was at the time a few more characters than the length of a jid (Job ID). Any tags of length less than 20 characters were padded with "|" chars out to 20 characters. Although not explicit, the data for an event comprised a python dict that was serialized by msgpack. New style event messages support event tags longer than 20 characters while still being backwards compatible with old style tags. The longer tags better enable name spaced event tags which tend to be longer. Moreover, the constraint that the event data be a python dict is now an explicit constraint and fire-event will now raise a ValueError if not. Tags must be ascii safe strings, that is, have values less than 0x80 Since the msgpack dict (map) indicators have values greater than or equal to 0x80 it can be unambiguously determined if the start of data is at char 21 or not. In the new style, when the tag is longer than 20 characters, an end of tag string is appended to the tag given by the string constant TAGEND, that is, two line feeds '\n\n'. When the tag is less than 20 characters then the tag is padded with pipes "|" out to 20 characters as before. When the tag is exactly 20 characters no padded is done. The get_event method intelligently figures out if the tag is longer than 20 characters. The convention for namespacing is to use dot characters "." as the name space delimiter. The name space "salt" is reserved by SaltStack for internal events. For example: Namespaced tag 'salt.runner.manage.status.start' ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import os import time import fnmatch import hashlib import logging import datetime import sys try: from collections.abc import MutableMapping except ImportError: from collections import MutableMapping from multiprocessing.util import Finalize from salt.ext.six.moves import range # Import third party libs from salt.ext import six import tornado.ioloop import tornado.iostream # Import salt libs import salt.config import salt.payload import salt.utils.asynchronous import salt.utils.cache import salt.utils.dicttrim import salt.utils.files import salt.utils.platform import salt.utils.process import salt.utils.stringutils import salt.utils.zeromq import salt.log.setup import salt.defaults.exitcodes import salt.transport.ipc import salt.transport.client log = logging.getLogger(__name__) # The SUB_EVENT set is for functions that require events fired based on # component executions, like the state system SUB_EVENT = ('state.highstate', 'state.sls') TAGEND = str('\n\n') # long tag delimiter TAGPARTER = str('/') # name spaced tag delimiter SALT = 'salt' # base prefix for all salt/ events # dict map of namespaced base tag prefixes for salt events TAGS = { 'auth': 'auth', # prefix for all salt/auth events 'job': 'job', # prefix for all salt/job events (minion jobs) 'key': 'key', # prefix for all salt/key events 'minion': 'minion', # prefix for all salt/minion events # (minion sourced events) 'syndic': 'syndic', # prefix for all salt/syndic events # (syndic minion sourced events) 'run': 'run', # prefix for all salt/run events (salt runners) 'wheel': 'wheel', # prefix for all salt/wheel events 'cloud': 'cloud', # prefix for all salt/cloud events 'fileserver': 'fileserver', # prefix for all salt/fileserver events 'queue': 'queue', # prefix for all salt/queue events } def get_event( node, sock_dir=None, transport='zeromq', opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' Return an event object suitable for the named transport :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' sock_dir = sock_dir or opts['sock_dir'] # TODO: AIO core is separate from transport if node == 'master': return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) return SaltEvent(node, sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) def fire_args(opts, jid, tag_data, prefix=''): ''' Fire an event containing the arguments passed to an orchestration job ''' try: tag_suffix = [jid, 'args'] except NameError: pass else: tag = tagify(tag_suffix, prefix) try: _event = get_master_event(opts, opts['sock_dir'], listen=False) _event.fire_event(tag_data, tag=tag) except Exception as exc: # Don't let a problem here hold up the rest of the orchestration log.warning( 'Failed to fire args event %s with data %s: %s', tag, tag_data, exc ) def tagify(suffix='', prefix='', base=SALT): ''' convenience function to build a namespaced event tag string from joining with the TABPART character the base, prefix and suffix If string prefix is a valid key in TAGS Then use the value of key prefix Else use prefix string If suffix is a list Then join all string elements of suffix individually Else use string suffix ''' parts = [base, TAGS.get(prefix, prefix)] if hasattr(suffix, 'append'): # list so extend parts parts.extend(suffix) else: # string so append parts.append(suffix) for index, _ in enumerate(parts): try: parts[index] = salt.utils.stringutils.to_str(parts[index]) except TypeError: parts[index] = str(parts[index]) return TAGPARTER.join([part for part in parts if part]) def update_stats(stats, start_time, data): ''' Calculate the master stats and return the updated stat info ''' end_time = time.time() cmd = data['cmd'] # the jid is used as the create time try: jid = data['jid'] except KeyError: try: jid = data['data']['__pub_jid'] except KeyError: log.info('jid not found in data, stats not updated') return stats create_time = int(time.mktime(time.strptime(jid, '%Y%m%d%H%M%S%f'))) latency = start_time - create_time duration = end_time - start_time stats[cmd]['runs'] += 1 stats[cmd]['latency'] = (stats[cmd]['latency'] * (stats[cmd]['runs'] - 1) + latency) / stats[cmd]['runs'] stats[cmd]['mean'] = (stats[cmd]['mean'] * (stats[cmd]['runs'] - 1) + duration) / stats[cmd]['runs'] return stats class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass class MasterEvent(SaltEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible Create a master event management object ''' def __init__( self, sock_dir, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): super(MasterEvent, self).__init__( 'master', sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) class LocalClientEvent(MasterEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible This class is just used to differentiate who is handling the events, specially on logs, but it's the same as MasterEvent. ''' class NamespacedEvent(object): ''' A wrapper for sending events within a specific base namespace ''' def __init__(self, event, base, print_func=None): self.event = event self.base = base self.print_func = print_func def fire_event(self, data, tag): self.event.fire_event(data, tagify(tag, base=self.base)) if self.print_func is not None: self.print_func(tag, data) class MinionEvent(SaltEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible Create a master event management object ''' def __init__(self, opts, listen=True, io_loop=None, keep_loop=False, raise_errors=False): super(MinionEvent, self).__init__( 'minion', sock_dir=opts.get('sock_dir'), opts=opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) class AsyncEventPublisher(object): ''' An event publisher class intended to run in an ioloop (within a single process) TODO: remove references to "minion_event" whenever we need to use this for other things ''' def __init__(self, opts, io_loop=None): self.opts = salt.config.DEFAULT_MINION_OPTS.copy() default_minion_sock_dir = self.opts['sock_dir'] self.opts.update(opts) self.io_loop = io_loop or tornado.ioloop.IOLoop.current() self._closing = False hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] epub_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash) ) if os.path.exists(epub_sock_path): os.unlink(epub_sock_path) epull_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash) ) if os.path.exists(epull_sock_path): os.unlink(epull_sock_path) if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_pub_port']) epull_uri = int(self.opts['tcp_pull_port']) else: epub_uri = epub_sock_path epull_uri = epull_sock_path log.debug('%s PUB socket URI: %s', self.__class__.__name__, epub_uri) log.debug('%s PULL socket URI: %s', self.__class__.__name__, epull_uri) minion_sock_dir = self.opts['sock_dir'] if not os.path.isdir(minion_sock_dir): # Let's try to create the directory defined on the configuration # file try: os.makedirs(minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: %s', exc) # Let's not fail yet and try using the default path if minion_sock_dir == default_minion_sock_dir: # We're already trying the default system path, stop now! raise if not os.path.isdir(default_minion_sock_dir): try: os.makedirs(default_minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: %s', exc) # Let's stop at this stage raise self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish ) log.info('Starting pull socket on %s', epull_uri) with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling minion events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() def __del__(self): self.close() class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' The interface that takes master events and republishes them out to anyone who wants to listen ''' def __init__(self, opts, **kwargs): super(EventPublisher, self).__init__(**kwargs) self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() self.opts.update(opts) self._closing = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def run(self): ''' Bind the pub and pull sockets for events ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.io_loop = tornado.ioloop.IOLoop() with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_master_pub_port']) epull_uri = int(self.opts['tcp_master_pull_port']) else: epub_uri = os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc' ) epull_uri = os.path.join( self.opts['sock_dir'], 'master_event_pull.ipc' ) self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish, ) # Start the master event publisher with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() if (self.opts['ipc_mode'] != 'tcp' and ( self.opts['publisher_acl'] or self.opts['external_auth'])): os.chmod(os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666) # Make sure the IO loop and respective sockets are closed and # destroyed Finalize(self, self.close, exitpriority=15) self.io_loop.start() def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling master events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() if hasattr(self, 'io_loop'): self.io_loop.close() def _handle_signals(self, signum, sigframe): self.close() super(EventPublisher, self)._handle_signals(signum, sigframe) def __del__(self): self.close() class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' A dedicated process which listens to the master event bus and queues and forwards events to the specified returner. ''' def __new__(cls, *args, **kwargs): if sys.platform.startswith('win'): # This is required for Windows. On Linux, when a process is # forked, the module namespace is copied and the current process # gets all of sys.modules from where the fork happens. This is not # the case for Windows. import salt.minion # pylint: disable=unused-import instance = super(EventReturn, cls).__new__(cls, *args, **kwargs) return instance def __init__(self, opts, **kwargs): ''' Initialize the EventReturn system Return an EventReturn instance ''' super(EventReturn, self).__init__(**kwargs) self.opts = opts self.event_return_queue = self.opts['event_return_queue'] local_minion_opts = self.opts.copy() local_minion_opts['file_client'] = 'local' self.minion = salt.minion.MasterMinion(local_minion_opts) self.event_queue = [] self.stop = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): # Flush and terminate if self.event_queue: self.flush_events() self.stop = True super(EventReturn, self)._handle_signals(signum, sigframe) def flush_events(self): if isinstance(self.opts['event_return'], list): # Multiple event returners for r in self.opts['event_return']: log.debug('Calling event returner %s, one of many.', r) event_return = '{0}.event_return'.format(r) self._flush_event_single(event_return) else: # Only a single event returner log.debug('Calling event returner %s, only one configured.', self.opts['event_return']) event_return = '{0}.event_return'.format( self.opts['event_return'] ) self._flush_event_single(event_return) del self.event_queue[:] def _flush_event_single(self, event_return): if event_return in self.minion.returners: try: self.minion.returners[event_return](self.event_queue) except Exception as exc: log.error('Could not store events - returner \'%s\' raised ' 'exception: %s', event_return, exc) # don't waste processing power unnecessarily on converting a # potentially huge dataset to a string if log.level <= logging.DEBUG: log.debug('Event data that caused an exception: %s', self.event_queue) else: log.error('Could not store return for event(s) - returner ' '\'%s\' not found.', event_return) def run(self): ''' Spin up the multiprocess event returner ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.event = get_event('master', opts=self.opts, listen=True) events = self.event.iter_events(full=True) self.event.fire_event({}, 'salt/event_listen/start') try: for event in events: if event['tag'] == 'salt/event/exit': self.stop = True if self._filter(event): self.event_queue.append(event) if len(self.event_queue) >= self.event_return_queue: self.flush_events() if self.stop: break finally: # flush all we have at this moment if self.event_queue: self.flush_events() def _filter(self, event): ''' Take an event and run it through configured filters. Returns True if event should be stored, else False ''' tag = event['tag'] if self.opts['event_return_whitelist']: ret = False else: ret = True for whitelist_match in self.opts['event_return_whitelist']: if fnmatch.fnmatch(tag, whitelist_match): ret = True break for blacklist_match in self.opts['event_return_blacklist']: if fnmatch.fnmatch(tag, blacklist_match): ret = False break return ret class StateFire(object): ''' Evaluate the data from a state run and fire events on the master and minion for each returned chunk that is not "green" This object is made to only run on a minion ''' def __init__(self, opts, auth=None): self.opts = opts if not auth: self.auth = salt.crypt.SAuth(self.opts) else: self.auth = auth def fire_master(self, data, tag, preload=None): ''' Fire an event off on the master server CLI Example: .. code-block:: bash salt '*' event.fire_master 'stuff to be in the event' 'tag' ''' load = {} if preload: load.update(preload) load.update({ 'id': self.opts['id'], 'tag': tag, 'data': data, 'cmd': '_minion_event', 'tok': self.auth.gen_token(b'salt'), }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True def fire_running(self, running): ''' Pass in a state "running" dict, this is the return dict from a state call. The dict will be processed and fire events. By default yellows and reds fire events on the master and minion, but this can be configured. ''' load = {'id': self.opts['id'], 'events': [], 'cmd': '_minion_event'} for stag in sorted( running, key=lambda k: running[k].get('__run_num__', 0)): if running[stag]['result'] and not running[stag]['changes']: continue tag = 'state_{0}_{1}'.format( six.text_type(running[stag]['result']), 'True' if running[stag]['changes'] else 'False') load['events'].append({ 'tag': tag, 'data': running[stag], }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True
saltstack/salt
salt/utils/event.py
fire_args
python
def fire_args(opts, jid, tag_data, prefix=''): ''' Fire an event containing the arguments passed to an orchestration job ''' try: tag_suffix = [jid, 'args'] except NameError: pass else: tag = tagify(tag_suffix, prefix) try: _event = get_master_event(opts, opts['sock_dir'], listen=False) _event.fire_event(tag_data, tag=tag) except Exception as exc: # Don't let a problem here hold up the rest of the orchestration log.warning( 'Failed to fire args event %s with data %s: %s', tag, tag_data, exc )
Fire an event containing the arguments passed to an orchestration job
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L157-L175
[ "def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False, keep_loop=False):\n '''\n Return an event object suitable for the named transport\n '''\n # TODO: AIO core is separate from transport\n if opts['transport'] in ('zeromq', 'tcp', 'detect'):\n return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors, keep_loop=keep_loop)\n", "def tagify(suffix='', prefix='', base=SALT):\n '''\n convenience function to build a namespaced event tag string\n from joining with the TABPART character the base, prefix and suffix\n\n If string prefix is a valid key in TAGS Then use the value of key prefix\n Else use prefix string\n\n If suffix is a list Then join all string elements of suffix individually\n Else use string suffix\n\n '''\n parts = [base, TAGS.get(prefix, prefix)]\n if hasattr(suffix, 'append'): # list so extend parts\n parts.extend(suffix)\n else: # string so append\n parts.append(suffix)\n\n for index, _ in enumerate(parts):\n try:\n parts[index] = salt.utils.stringutils.to_str(parts[index])\n except TypeError:\n parts[index] = str(parts[index])\n return TAGPARTER.join([part for part in parts if part])\n", "def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n" ]
# -*- coding: utf-8 -*- ''' Manage events Events are all fired off via a zeromq 'pub' socket, and listened to with local zeromq 'sub' sockets All of the formatting is self contained in the event module, so we should be able to modify the structure in the future since the same module used to read events is the same module used to fire off events. Old style event messages were comprised of two parts delimited at the 20 char point. The first 20 characters are used for the zeromq subscriber to match publications and 20 characters was chosen because it was at the time a few more characters than the length of a jid (Job ID). Any tags of length less than 20 characters were padded with "|" chars out to 20 characters. Although not explicit, the data for an event comprised a python dict that was serialized by msgpack. New style event messages support event tags longer than 20 characters while still being backwards compatible with old style tags. The longer tags better enable name spaced event tags which tend to be longer. Moreover, the constraint that the event data be a python dict is now an explicit constraint and fire-event will now raise a ValueError if not. Tags must be ascii safe strings, that is, have values less than 0x80 Since the msgpack dict (map) indicators have values greater than or equal to 0x80 it can be unambiguously determined if the start of data is at char 21 or not. In the new style, when the tag is longer than 20 characters, an end of tag string is appended to the tag given by the string constant TAGEND, that is, two line feeds '\n\n'. When the tag is less than 20 characters then the tag is padded with pipes "|" out to 20 characters as before. When the tag is exactly 20 characters no padded is done. The get_event method intelligently figures out if the tag is longer than 20 characters. The convention for namespacing is to use dot characters "." as the name space delimiter. The name space "salt" is reserved by SaltStack for internal events. For example: Namespaced tag 'salt.runner.manage.status.start' ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import os import time import fnmatch import hashlib import logging import datetime import sys try: from collections.abc import MutableMapping except ImportError: from collections import MutableMapping from multiprocessing.util import Finalize from salt.ext.six.moves import range # Import third party libs from salt.ext import six import tornado.ioloop import tornado.iostream # Import salt libs import salt.config import salt.payload import salt.utils.asynchronous import salt.utils.cache import salt.utils.dicttrim import salt.utils.files import salt.utils.platform import salt.utils.process import salt.utils.stringutils import salt.utils.zeromq import salt.log.setup import salt.defaults.exitcodes import salt.transport.ipc import salt.transport.client log = logging.getLogger(__name__) # The SUB_EVENT set is for functions that require events fired based on # component executions, like the state system SUB_EVENT = ('state.highstate', 'state.sls') TAGEND = str('\n\n') # long tag delimiter TAGPARTER = str('/') # name spaced tag delimiter SALT = 'salt' # base prefix for all salt/ events # dict map of namespaced base tag prefixes for salt events TAGS = { 'auth': 'auth', # prefix for all salt/auth events 'job': 'job', # prefix for all salt/job events (minion jobs) 'key': 'key', # prefix for all salt/key events 'minion': 'minion', # prefix for all salt/minion events # (minion sourced events) 'syndic': 'syndic', # prefix for all salt/syndic events # (syndic minion sourced events) 'run': 'run', # prefix for all salt/run events (salt runners) 'wheel': 'wheel', # prefix for all salt/wheel events 'cloud': 'cloud', # prefix for all salt/cloud events 'fileserver': 'fileserver', # prefix for all salt/fileserver events 'queue': 'queue', # prefix for all salt/queue events } def get_event( node, sock_dir=None, transport='zeromq', opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' Return an event object suitable for the named transport :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' sock_dir = sock_dir or opts['sock_dir'] # TODO: AIO core is separate from transport if node == 'master': return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) return SaltEvent(node, sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False, keep_loop=False): ''' Return an event object suitable for the named transport ''' # TODO: AIO core is separate from transport if opts['transport'] in ('zeromq', 'tcp', 'detect'): return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors, keep_loop=keep_loop) def tagify(suffix='', prefix='', base=SALT): ''' convenience function to build a namespaced event tag string from joining with the TABPART character the base, prefix and suffix If string prefix is a valid key in TAGS Then use the value of key prefix Else use prefix string If suffix is a list Then join all string elements of suffix individually Else use string suffix ''' parts = [base, TAGS.get(prefix, prefix)] if hasattr(suffix, 'append'): # list so extend parts parts.extend(suffix) else: # string so append parts.append(suffix) for index, _ in enumerate(parts): try: parts[index] = salt.utils.stringutils.to_str(parts[index]) except TypeError: parts[index] = str(parts[index]) return TAGPARTER.join([part for part in parts if part]) def update_stats(stats, start_time, data): ''' Calculate the master stats and return the updated stat info ''' end_time = time.time() cmd = data['cmd'] # the jid is used as the create time try: jid = data['jid'] except KeyError: try: jid = data['data']['__pub_jid'] except KeyError: log.info('jid not found in data, stats not updated') return stats create_time = int(time.mktime(time.strptime(jid, '%Y%m%d%H%M%S%f'))) latency = start_time - create_time duration = end_time - start_time stats[cmd]['runs'] += 1 stats[cmd]['latency'] = (stats[cmd]['latency'] * (stats[cmd]['runs'] - 1) + latency) / stats[cmd]['runs'] stats[cmd]['mean'] = (stats[cmd]['mean'] * (stats[cmd]['runs'] - 1) + duration) / stats[cmd]['runs'] return stats class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass class MasterEvent(SaltEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible Create a master event management object ''' def __init__( self, sock_dir, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): super(MasterEvent, self).__init__( 'master', sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) class LocalClientEvent(MasterEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible This class is just used to differentiate who is handling the events, specially on logs, but it's the same as MasterEvent. ''' class NamespacedEvent(object): ''' A wrapper for sending events within a specific base namespace ''' def __init__(self, event, base, print_func=None): self.event = event self.base = base self.print_func = print_func def fire_event(self, data, tag): self.event.fire_event(data, tagify(tag, base=self.base)) if self.print_func is not None: self.print_func(tag, data) class MinionEvent(SaltEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible Create a master event management object ''' def __init__(self, opts, listen=True, io_loop=None, keep_loop=False, raise_errors=False): super(MinionEvent, self).__init__( 'minion', sock_dir=opts.get('sock_dir'), opts=opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) class AsyncEventPublisher(object): ''' An event publisher class intended to run in an ioloop (within a single process) TODO: remove references to "minion_event" whenever we need to use this for other things ''' def __init__(self, opts, io_loop=None): self.opts = salt.config.DEFAULT_MINION_OPTS.copy() default_minion_sock_dir = self.opts['sock_dir'] self.opts.update(opts) self.io_loop = io_loop or tornado.ioloop.IOLoop.current() self._closing = False hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] epub_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash) ) if os.path.exists(epub_sock_path): os.unlink(epub_sock_path) epull_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash) ) if os.path.exists(epull_sock_path): os.unlink(epull_sock_path) if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_pub_port']) epull_uri = int(self.opts['tcp_pull_port']) else: epub_uri = epub_sock_path epull_uri = epull_sock_path log.debug('%s PUB socket URI: %s', self.__class__.__name__, epub_uri) log.debug('%s PULL socket URI: %s', self.__class__.__name__, epull_uri) minion_sock_dir = self.opts['sock_dir'] if not os.path.isdir(minion_sock_dir): # Let's try to create the directory defined on the configuration # file try: os.makedirs(minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: %s', exc) # Let's not fail yet and try using the default path if minion_sock_dir == default_minion_sock_dir: # We're already trying the default system path, stop now! raise if not os.path.isdir(default_minion_sock_dir): try: os.makedirs(default_minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: %s', exc) # Let's stop at this stage raise self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish ) log.info('Starting pull socket on %s', epull_uri) with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling minion events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() def __del__(self): self.close() class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' The interface that takes master events and republishes them out to anyone who wants to listen ''' def __init__(self, opts, **kwargs): super(EventPublisher, self).__init__(**kwargs) self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() self.opts.update(opts) self._closing = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def run(self): ''' Bind the pub and pull sockets for events ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.io_loop = tornado.ioloop.IOLoop() with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_master_pub_port']) epull_uri = int(self.opts['tcp_master_pull_port']) else: epub_uri = os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc' ) epull_uri = os.path.join( self.opts['sock_dir'], 'master_event_pull.ipc' ) self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish, ) # Start the master event publisher with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() if (self.opts['ipc_mode'] != 'tcp' and ( self.opts['publisher_acl'] or self.opts['external_auth'])): os.chmod(os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666) # Make sure the IO loop and respective sockets are closed and # destroyed Finalize(self, self.close, exitpriority=15) self.io_loop.start() def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling master events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() if hasattr(self, 'io_loop'): self.io_loop.close() def _handle_signals(self, signum, sigframe): self.close() super(EventPublisher, self)._handle_signals(signum, sigframe) def __del__(self): self.close() class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' A dedicated process which listens to the master event bus and queues and forwards events to the specified returner. ''' def __new__(cls, *args, **kwargs): if sys.platform.startswith('win'): # This is required for Windows. On Linux, when a process is # forked, the module namespace is copied and the current process # gets all of sys.modules from where the fork happens. This is not # the case for Windows. import salt.minion # pylint: disable=unused-import instance = super(EventReturn, cls).__new__(cls, *args, **kwargs) return instance def __init__(self, opts, **kwargs): ''' Initialize the EventReturn system Return an EventReturn instance ''' super(EventReturn, self).__init__(**kwargs) self.opts = opts self.event_return_queue = self.opts['event_return_queue'] local_minion_opts = self.opts.copy() local_minion_opts['file_client'] = 'local' self.minion = salt.minion.MasterMinion(local_minion_opts) self.event_queue = [] self.stop = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): # Flush and terminate if self.event_queue: self.flush_events() self.stop = True super(EventReturn, self)._handle_signals(signum, sigframe) def flush_events(self): if isinstance(self.opts['event_return'], list): # Multiple event returners for r in self.opts['event_return']: log.debug('Calling event returner %s, one of many.', r) event_return = '{0}.event_return'.format(r) self._flush_event_single(event_return) else: # Only a single event returner log.debug('Calling event returner %s, only one configured.', self.opts['event_return']) event_return = '{0}.event_return'.format( self.opts['event_return'] ) self._flush_event_single(event_return) del self.event_queue[:] def _flush_event_single(self, event_return): if event_return in self.minion.returners: try: self.minion.returners[event_return](self.event_queue) except Exception as exc: log.error('Could not store events - returner \'%s\' raised ' 'exception: %s', event_return, exc) # don't waste processing power unnecessarily on converting a # potentially huge dataset to a string if log.level <= logging.DEBUG: log.debug('Event data that caused an exception: %s', self.event_queue) else: log.error('Could not store return for event(s) - returner ' '\'%s\' not found.', event_return) def run(self): ''' Spin up the multiprocess event returner ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.event = get_event('master', opts=self.opts, listen=True) events = self.event.iter_events(full=True) self.event.fire_event({}, 'salt/event_listen/start') try: for event in events: if event['tag'] == 'salt/event/exit': self.stop = True if self._filter(event): self.event_queue.append(event) if len(self.event_queue) >= self.event_return_queue: self.flush_events() if self.stop: break finally: # flush all we have at this moment if self.event_queue: self.flush_events() def _filter(self, event): ''' Take an event and run it through configured filters. Returns True if event should be stored, else False ''' tag = event['tag'] if self.opts['event_return_whitelist']: ret = False else: ret = True for whitelist_match in self.opts['event_return_whitelist']: if fnmatch.fnmatch(tag, whitelist_match): ret = True break for blacklist_match in self.opts['event_return_blacklist']: if fnmatch.fnmatch(tag, blacklist_match): ret = False break return ret class StateFire(object): ''' Evaluate the data from a state run and fire events on the master and minion for each returned chunk that is not "green" This object is made to only run on a minion ''' def __init__(self, opts, auth=None): self.opts = opts if not auth: self.auth = salt.crypt.SAuth(self.opts) else: self.auth = auth def fire_master(self, data, tag, preload=None): ''' Fire an event off on the master server CLI Example: .. code-block:: bash salt '*' event.fire_master 'stuff to be in the event' 'tag' ''' load = {} if preload: load.update(preload) load.update({ 'id': self.opts['id'], 'tag': tag, 'data': data, 'cmd': '_minion_event', 'tok': self.auth.gen_token(b'salt'), }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True def fire_running(self, running): ''' Pass in a state "running" dict, this is the return dict from a state call. The dict will be processed and fire events. By default yellows and reds fire events on the master and minion, but this can be configured. ''' load = {'id': self.opts['id'], 'events': [], 'cmd': '_minion_event'} for stag in sorted( running, key=lambda k: running[k].get('__run_num__', 0)): if running[stag]['result'] and not running[stag]['changes']: continue tag = 'state_{0}_{1}'.format( six.text_type(running[stag]['result']), 'True' if running[stag]['changes'] else 'False') load['events'].append({ 'tag': tag, 'data': running[stag], }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True
saltstack/salt
salt/utils/event.py
tagify
python
def tagify(suffix='', prefix='', base=SALT): ''' convenience function to build a namespaced event tag string from joining with the TABPART character the base, prefix and suffix If string prefix is a valid key in TAGS Then use the value of key prefix Else use prefix string If suffix is a list Then join all string elements of suffix individually Else use string suffix ''' parts = [base, TAGS.get(prefix, prefix)] if hasattr(suffix, 'append'): # list so extend parts parts.extend(suffix) else: # string so append parts.append(suffix) for index, _ in enumerate(parts): try: parts[index] = salt.utils.stringutils.to_str(parts[index]) except TypeError: parts[index] = str(parts[index]) return TAGPARTER.join([part for part in parts if part])
convenience function to build a namespaced event tag string from joining with the TABPART character the base, prefix and suffix If string prefix is a valid key in TAGS Then use the value of key prefix Else use prefix string If suffix is a list Then join all string elements of suffix individually Else use string suffix
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L178-L201
[ "def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n" ]
# -*- coding: utf-8 -*- ''' Manage events Events are all fired off via a zeromq 'pub' socket, and listened to with local zeromq 'sub' sockets All of the formatting is self contained in the event module, so we should be able to modify the structure in the future since the same module used to read events is the same module used to fire off events. Old style event messages were comprised of two parts delimited at the 20 char point. The first 20 characters are used for the zeromq subscriber to match publications and 20 characters was chosen because it was at the time a few more characters than the length of a jid (Job ID). Any tags of length less than 20 characters were padded with "|" chars out to 20 characters. Although not explicit, the data for an event comprised a python dict that was serialized by msgpack. New style event messages support event tags longer than 20 characters while still being backwards compatible with old style tags. The longer tags better enable name spaced event tags which tend to be longer. Moreover, the constraint that the event data be a python dict is now an explicit constraint and fire-event will now raise a ValueError if not. Tags must be ascii safe strings, that is, have values less than 0x80 Since the msgpack dict (map) indicators have values greater than or equal to 0x80 it can be unambiguously determined if the start of data is at char 21 or not. In the new style, when the tag is longer than 20 characters, an end of tag string is appended to the tag given by the string constant TAGEND, that is, two line feeds '\n\n'. When the tag is less than 20 characters then the tag is padded with pipes "|" out to 20 characters as before. When the tag is exactly 20 characters no padded is done. The get_event method intelligently figures out if the tag is longer than 20 characters. The convention for namespacing is to use dot characters "." as the name space delimiter. The name space "salt" is reserved by SaltStack for internal events. For example: Namespaced tag 'salt.runner.manage.status.start' ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import os import time import fnmatch import hashlib import logging import datetime import sys try: from collections.abc import MutableMapping except ImportError: from collections import MutableMapping from multiprocessing.util import Finalize from salt.ext.six.moves import range # Import third party libs from salt.ext import six import tornado.ioloop import tornado.iostream # Import salt libs import salt.config import salt.payload import salt.utils.asynchronous import salt.utils.cache import salt.utils.dicttrim import salt.utils.files import salt.utils.platform import salt.utils.process import salt.utils.stringutils import salt.utils.zeromq import salt.log.setup import salt.defaults.exitcodes import salt.transport.ipc import salt.transport.client log = logging.getLogger(__name__) # The SUB_EVENT set is for functions that require events fired based on # component executions, like the state system SUB_EVENT = ('state.highstate', 'state.sls') TAGEND = str('\n\n') # long tag delimiter TAGPARTER = str('/') # name spaced tag delimiter SALT = 'salt' # base prefix for all salt/ events # dict map of namespaced base tag prefixes for salt events TAGS = { 'auth': 'auth', # prefix for all salt/auth events 'job': 'job', # prefix for all salt/job events (minion jobs) 'key': 'key', # prefix for all salt/key events 'minion': 'minion', # prefix for all salt/minion events # (minion sourced events) 'syndic': 'syndic', # prefix for all salt/syndic events # (syndic minion sourced events) 'run': 'run', # prefix for all salt/run events (salt runners) 'wheel': 'wheel', # prefix for all salt/wheel events 'cloud': 'cloud', # prefix for all salt/cloud events 'fileserver': 'fileserver', # prefix for all salt/fileserver events 'queue': 'queue', # prefix for all salt/queue events } def get_event( node, sock_dir=None, transport='zeromq', opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' Return an event object suitable for the named transport :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' sock_dir = sock_dir or opts['sock_dir'] # TODO: AIO core is separate from transport if node == 'master': return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) return SaltEvent(node, sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False, keep_loop=False): ''' Return an event object suitable for the named transport ''' # TODO: AIO core is separate from transport if opts['transport'] in ('zeromq', 'tcp', 'detect'): return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors, keep_loop=keep_loop) def fire_args(opts, jid, tag_data, prefix=''): ''' Fire an event containing the arguments passed to an orchestration job ''' try: tag_suffix = [jid, 'args'] except NameError: pass else: tag = tagify(tag_suffix, prefix) try: _event = get_master_event(opts, opts['sock_dir'], listen=False) _event.fire_event(tag_data, tag=tag) except Exception as exc: # Don't let a problem here hold up the rest of the orchestration log.warning( 'Failed to fire args event %s with data %s: %s', tag, tag_data, exc ) def update_stats(stats, start_time, data): ''' Calculate the master stats and return the updated stat info ''' end_time = time.time() cmd = data['cmd'] # the jid is used as the create time try: jid = data['jid'] except KeyError: try: jid = data['data']['__pub_jid'] except KeyError: log.info('jid not found in data, stats not updated') return stats create_time = int(time.mktime(time.strptime(jid, '%Y%m%d%H%M%S%f'))) latency = start_time - create_time duration = end_time - start_time stats[cmd]['runs'] += 1 stats[cmd]['latency'] = (stats[cmd]['latency'] * (stats[cmd]['runs'] - 1) + latency) / stats[cmd]['runs'] stats[cmd]['mean'] = (stats[cmd]['mean'] * (stats[cmd]['runs'] - 1) + duration) / stats[cmd]['runs'] return stats class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass class MasterEvent(SaltEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible Create a master event management object ''' def __init__( self, sock_dir, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): super(MasterEvent, self).__init__( 'master', sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) class LocalClientEvent(MasterEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible This class is just used to differentiate who is handling the events, specially on logs, but it's the same as MasterEvent. ''' class NamespacedEvent(object): ''' A wrapper for sending events within a specific base namespace ''' def __init__(self, event, base, print_func=None): self.event = event self.base = base self.print_func = print_func def fire_event(self, data, tag): self.event.fire_event(data, tagify(tag, base=self.base)) if self.print_func is not None: self.print_func(tag, data) class MinionEvent(SaltEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible Create a master event management object ''' def __init__(self, opts, listen=True, io_loop=None, keep_loop=False, raise_errors=False): super(MinionEvent, self).__init__( 'minion', sock_dir=opts.get('sock_dir'), opts=opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) class AsyncEventPublisher(object): ''' An event publisher class intended to run in an ioloop (within a single process) TODO: remove references to "minion_event" whenever we need to use this for other things ''' def __init__(self, opts, io_loop=None): self.opts = salt.config.DEFAULT_MINION_OPTS.copy() default_minion_sock_dir = self.opts['sock_dir'] self.opts.update(opts) self.io_loop = io_loop or tornado.ioloop.IOLoop.current() self._closing = False hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] epub_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash) ) if os.path.exists(epub_sock_path): os.unlink(epub_sock_path) epull_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash) ) if os.path.exists(epull_sock_path): os.unlink(epull_sock_path) if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_pub_port']) epull_uri = int(self.opts['tcp_pull_port']) else: epub_uri = epub_sock_path epull_uri = epull_sock_path log.debug('%s PUB socket URI: %s', self.__class__.__name__, epub_uri) log.debug('%s PULL socket URI: %s', self.__class__.__name__, epull_uri) minion_sock_dir = self.opts['sock_dir'] if not os.path.isdir(minion_sock_dir): # Let's try to create the directory defined on the configuration # file try: os.makedirs(minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: %s', exc) # Let's not fail yet and try using the default path if minion_sock_dir == default_minion_sock_dir: # We're already trying the default system path, stop now! raise if not os.path.isdir(default_minion_sock_dir): try: os.makedirs(default_minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: %s', exc) # Let's stop at this stage raise self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish ) log.info('Starting pull socket on %s', epull_uri) with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling minion events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() def __del__(self): self.close() class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' The interface that takes master events and republishes them out to anyone who wants to listen ''' def __init__(self, opts, **kwargs): super(EventPublisher, self).__init__(**kwargs) self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() self.opts.update(opts) self._closing = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def run(self): ''' Bind the pub and pull sockets for events ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.io_loop = tornado.ioloop.IOLoop() with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_master_pub_port']) epull_uri = int(self.opts['tcp_master_pull_port']) else: epub_uri = os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc' ) epull_uri = os.path.join( self.opts['sock_dir'], 'master_event_pull.ipc' ) self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish, ) # Start the master event publisher with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() if (self.opts['ipc_mode'] != 'tcp' and ( self.opts['publisher_acl'] or self.opts['external_auth'])): os.chmod(os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666) # Make sure the IO loop and respective sockets are closed and # destroyed Finalize(self, self.close, exitpriority=15) self.io_loop.start() def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling master events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() if hasattr(self, 'io_loop'): self.io_loop.close() def _handle_signals(self, signum, sigframe): self.close() super(EventPublisher, self)._handle_signals(signum, sigframe) def __del__(self): self.close() class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' A dedicated process which listens to the master event bus and queues and forwards events to the specified returner. ''' def __new__(cls, *args, **kwargs): if sys.platform.startswith('win'): # This is required for Windows. On Linux, when a process is # forked, the module namespace is copied and the current process # gets all of sys.modules from where the fork happens. This is not # the case for Windows. import salt.minion # pylint: disable=unused-import instance = super(EventReturn, cls).__new__(cls, *args, **kwargs) return instance def __init__(self, opts, **kwargs): ''' Initialize the EventReturn system Return an EventReturn instance ''' super(EventReturn, self).__init__(**kwargs) self.opts = opts self.event_return_queue = self.opts['event_return_queue'] local_minion_opts = self.opts.copy() local_minion_opts['file_client'] = 'local' self.minion = salt.minion.MasterMinion(local_minion_opts) self.event_queue = [] self.stop = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): # Flush and terminate if self.event_queue: self.flush_events() self.stop = True super(EventReturn, self)._handle_signals(signum, sigframe) def flush_events(self): if isinstance(self.opts['event_return'], list): # Multiple event returners for r in self.opts['event_return']: log.debug('Calling event returner %s, one of many.', r) event_return = '{0}.event_return'.format(r) self._flush_event_single(event_return) else: # Only a single event returner log.debug('Calling event returner %s, only one configured.', self.opts['event_return']) event_return = '{0}.event_return'.format( self.opts['event_return'] ) self._flush_event_single(event_return) del self.event_queue[:] def _flush_event_single(self, event_return): if event_return in self.minion.returners: try: self.minion.returners[event_return](self.event_queue) except Exception as exc: log.error('Could not store events - returner \'%s\' raised ' 'exception: %s', event_return, exc) # don't waste processing power unnecessarily on converting a # potentially huge dataset to a string if log.level <= logging.DEBUG: log.debug('Event data that caused an exception: %s', self.event_queue) else: log.error('Could not store return for event(s) - returner ' '\'%s\' not found.', event_return) def run(self): ''' Spin up the multiprocess event returner ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.event = get_event('master', opts=self.opts, listen=True) events = self.event.iter_events(full=True) self.event.fire_event({}, 'salt/event_listen/start') try: for event in events: if event['tag'] == 'salt/event/exit': self.stop = True if self._filter(event): self.event_queue.append(event) if len(self.event_queue) >= self.event_return_queue: self.flush_events() if self.stop: break finally: # flush all we have at this moment if self.event_queue: self.flush_events() def _filter(self, event): ''' Take an event and run it through configured filters. Returns True if event should be stored, else False ''' tag = event['tag'] if self.opts['event_return_whitelist']: ret = False else: ret = True for whitelist_match in self.opts['event_return_whitelist']: if fnmatch.fnmatch(tag, whitelist_match): ret = True break for blacklist_match in self.opts['event_return_blacklist']: if fnmatch.fnmatch(tag, blacklist_match): ret = False break return ret class StateFire(object): ''' Evaluate the data from a state run and fire events on the master and minion for each returned chunk that is not "green" This object is made to only run on a minion ''' def __init__(self, opts, auth=None): self.opts = opts if not auth: self.auth = salt.crypt.SAuth(self.opts) else: self.auth = auth def fire_master(self, data, tag, preload=None): ''' Fire an event off on the master server CLI Example: .. code-block:: bash salt '*' event.fire_master 'stuff to be in the event' 'tag' ''' load = {} if preload: load.update(preload) load.update({ 'id': self.opts['id'], 'tag': tag, 'data': data, 'cmd': '_minion_event', 'tok': self.auth.gen_token(b'salt'), }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True def fire_running(self, running): ''' Pass in a state "running" dict, this is the return dict from a state call. The dict will be processed and fire events. By default yellows and reds fire events on the master and minion, but this can be configured. ''' load = {'id': self.opts['id'], 'events': [], 'cmd': '_minion_event'} for stag in sorted( running, key=lambda k: running[k].get('__run_num__', 0)): if running[stag]['result'] and not running[stag]['changes']: continue tag = 'state_{0}_{1}'.format( six.text_type(running[stag]['result']), 'True' if running[stag]['changes'] else 'False') load['events'].append({ 'tag': tag, 'data': running[stag], }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True
saltstack/salt
salt/utils/event.py
update_stats
python
def update_stats(stats, start_time, data): ''' Calculate the master stats and return the updated stat info ''' end_time = time.time() cmd = data['cmd'] # the jid is used as the create time try: jid = data['jid'] except KeyError: try: jid = data['data']['__pub_jid'] except KeyError: log.info('jid not found in data, stats not updated') return stats create_time = int(time.mktime(time.strptime(jid, '%Y%m%d%H%M%S%f'))) latency = start_time - create_time duration = end_time - start_time stats[cmd]['runs'] += 1 stats[cmd]['latency'] = (stats[cmd]['latency'] * (stats[cmd]['runs'] - 1) + latency) / stats[cmd]['runs'] stats[cmd]['mean'] = (stats[cmd]['mean'] * (stats[cmd]['runs'] - 1) + duration) / stats[cmd]['runs'] return stats
Calculate the master stats and return the updated stat info
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L204-L227
null
# -*- coding: utf-8 -*- ''' Manage events Events are all fired off via a zeromq 'pub' socket, and listened to with local zeromq 'sub' sockets All of the formatting is self contained in the event module, so we should be able to modify the structure in the future since the same module used to read events is the same module used to fire off events. Old style event messages were comprised of two parts delimited at the 20 char point. The first 20 characters are used for the zeromq subscriber to match publications and 20 characters was chosen because it was at the time a few more characters than the length of a jid (Job ID). Any tags of length less than 20 characters were padded with "|" chars out to 20 characters. Although not explicit, the data for an event comprised a python dict that was serialized by msgpack. New style event messages support event tags longer than 20 characters while still being backwards compatible with old style tags. The longer tags better enable name spaced event tags which tend to be longer. Moreover, the constraint that the event data be a python dict is now an explicit constraint and fire-event will now raise a ValueError if not. Tags must be ascii safe strings, that is, have values less than 0x80 Since the msgpack dict (map) indicators have values greater than or equal to 0x80 it can be unambiguously determined if the start of data is at char 21 or not. In the new style, when the tag is longer than 20 characters, an end of tag string is appended to the tag given by the string constant TAGEND, that is, two line feeds '\n\n'. When the tag is less than 20 characters then the tag is padded with pipes "|" out to 20 characters as before. When the tag is exactly 20 characters no padded is done. The get_event method intelligently figures out if the tag is longer than 20 characters. The convention for namespacing is to use dot characters "." as the name space delimiter. The name space "salt" is reserved by SaltStack for internal events. For example: Namespaced tag 'salt.runner.manage.status.start' ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import os import time import fnmatch import hashlib import logging import datetime import sys try: from collections.abc import MutableMapping except ImportError: from collections import MutableMapping from multiprocessing.util import Finalize from salt.ext.six.moves import range # Import third party libs from salt.ext import six import tornado.ioloop import tornado.iostream # Import salt libs import salt.config import salt.payload import salt.utils.asynchronous import salt.utils.cache import salt.utils.dicttrim import salt.utils.files import salt.utils.platform import salt.utils.process import salt.utils.stringutils import salt.utils.zeromq import salt.log.setup import salt.defaults.exitcodes import salt.transport.ipc import salt.transport.client log = logging.getLogger(__name__) # The SUB_EVENT set is for functions that require events fired based on # component executions, like the state system SUB_EVENT = ('state.highstate', 'state.sls') TAGEND = str('\n\n') # long tag delimiter TAGPARTER = str('/') # name spaced tag delimiter SALT = 'salt' # base prefix for all salt/ events # dict map of namespaced base tag prefixes for salt events TAGS = { 'auth': 'auth', # prefix for all salt/auth events 'job': 'job', # prefix for all salt/job events (minion jobs) 'key': 'key', # prefix for all salt/key events 'minion': 'minion', # prefix for all salt/minion events # (minion sourced events) 'syndic': 'syndic', # prefix for all salt/syndic events # (syndic minion sourced events) 'run': 'run', # prefix for all salt/run events (salt runners) 'wheel': 'wheel', # prefix for all salt/wheel events 'cloud': 'cloud', # prefix for all salt/cloud events 'fileserver': 'fileserver', # prefix for all salt/fileserver events 'queue': 'queue', # prefix for all salt/queue events } def get_event( node, sock_dir=None, transport='zeromq', opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' Return an event object suitable for the named transport :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' sock_dir = sock_dir or opts['sock_dir'] # TODO: AIO core is separate from transport if node == 'master': return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) return SaltEvent(node, sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False, keep_loop=False): ''' Return an event object suitable for the named transport ''' # TODO: AIO core is separate from transport if opts['transport'] in ('zeromq', 'tcp', 'detect'): return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors, keep_loop=keep_loop) def fire_args(opts, jid, tag_data, prefix=''): ''' Fire an event containing the arguments passed to an orchestration job ''' try: tag_suffix = [jid, 'args'] except NameError: pass else: tag = tagify(tag_suffix, prefix) try: _event = get_master_event(opts, opts['sock_dir'], listen=False) _event.fire_event(tag_data, tag=tag) except Exception as exc: # Don't let a problem here hold up the rest of the orchestration log.warning( 'Failed to fire args event %s with data %s: %s', tag, tag_data, exc ) def tagify(suffix='', prefix='', base=SALT): ''' convenience function to build a namespaced event tag string from joining with the TABPART character the base, prefix and suffix If string prefix is a valid key in TAGS Then use the value of key prefix Else use prefix string If suffix is a list Then join all string elements of suffix individually Else use string suffix ''' parts = [base, TAGS.get(prefix, prefix)] if hasattr(suffix, 'append'): # list so extend parts parts.extend(suffix) else: # string so append parts.append(suffix) for index, _ in enumerate(parts): try: parts[index] = salt.utils.stringutils.to_str(parts[index]) except TypeError: parts[index] = str(parts[index]) return TAGPARTER.join([part for part in parts if part]) class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass class MasterEvent(SaltEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible Create a master event management object ''' def __init__( self, sock_dir, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): super(MasterEvent, self).__init__( 'master', sock_dir, opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) class LocalClientEvent(MasterEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible This class is just used to differentiate who is handling the events, specially on logs, but it's the same as MasterEvent. ''' class NamespacedEvent(object): ''' A wrapper for sending events within a specific base namespace ''' def __init__(self, event, base, print_func=None): self.event = event self.base = base self.print_func = print_func def fire_event(self, data, tag): self.event.fire_event(data, tagify(tag, base=self.base)) if self.print_func is not None: self.print_func(tag, data) class MinionEvent(SaltEvent): ''' Warning! Use the get_event function or the code will not be RAET compatible Create a master event management object ''' def __init__(self, opts, listen=True, io_loop=None, keep_loop=False, raise_errors=False): super(MinionEvent, self).__init__( 'minion', sock_dir=opts.get('sock_dir'), opts=opts, listen=listen, io_loop=io_loop, keep_loop=keep_loop, raise_errors=raise_errors) class AsyncEventPublisher(object): ''' An event publisher class intended to run in an ioloop (within a single process) TODO: remove references to "minion_event" whenever we need to use this for other things ''' def __init__(self, opts, io_loop=None): self.opts = salt.config.DEFAULT_MINION_OPTS.copy() default_minion_sock_dir = self.opts['sock_dir'] self.opts.update(opts) self.io_loop = io_loop or tornado.ioloop.IOLoop.current() self._closing = False hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] epub_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash) ) if os.path.exists(epub_sock_path): os.unlink(epub_sock_path) epull_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash) ) if os.path.exists(epull_sock_path): os.unlink(epull_sock_path) if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_pub_port']) epull_uri = int(self.opts['tcp_pull_port']) else: epub_uri = epub_sock_path epull_uri = epull_sock_path log.debug('%s PUB socket URI: %s', self.__class__.__name__, epub_uri) log.debug('%s PULL socket URI: %s', self.__class__.__name__, epull_uri) minion_sock_dir = self.opts['sock_dir'] if not os.path.isdir(minion_sock_dir): # Let's try to create the directory defined on the configuration # file try: os.makedirs(minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: %s', exc) # Let's not fail yet and try using the default path if minion_sock_dir == default_minion_sock_dir: # We're already trying the default system path, stop now! raise if not os.path.isdir(default_minion_sock_dir): try: os.makedirs(default_minion_sock_dir, 0o755) except OSError as exc: log.error('Could not create SOCK_DIR: %s', exc) # Let's stop at this stage raise self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish ) log.info('Starting pull socket on %s', epull_uri) with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling minion events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() def __del__(self): self.close() class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' The interface that takes master events and republishes them out to anyone who wants to listen ''' def __init__(self, opts, **kwargs): super(EventPublisher, self).__init__(**kwargs) self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() self.opts.update(opts) self._closing = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def run(self): ''' Bind the pub and pull sockets for events ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.io_loop = tornado.ioloop.IOLoop() with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_master_pub_port']) epull_uri = int(self.opts['tcp_master_pull_port']) else: epub_uri = os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc' ) epull_uri = os.path.join( self.opts['sock_dir'], 'master_event_pull.ipc' ) self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish, ) # Start the master event publisher with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() if (self.opts['ipc_mode'] != 'tcp' and ( self.opts['publisher_acl'] or self.opts['external_auth'])): os.chmod(os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666) # Make sure the IO loop and respective sockets are closed and # destroyed Finalize(self, self.close, exitpriority=15) self.io_loop.start() def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling master events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() if hasattr(self, 'io_loop'): self.io_loop.close() def _handle_signals(self, signum, sigframe): self.close() super(EventPublisher, self)._handle_signals(signum, sigframe) def __del__(self): self.close() class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' A dedicated process which listens to the master event bus and queues and forwards events to the specified returner. ''' def __new__(cls, *args, **kwargs): if sys.platform.startswith('win'): # This is required for Windows. On Linux, when a process is # forked, the module namespace is copied and the current process # gets all of sys.modules from where the fork happens. This is not # the case for Windows. import salt.minion # pylint: disable=unused-import instance = super(EventReturn, cls).__new__(cls, *args, **kwargs) return instance def __init__(self, opts, **kwargs): ''' Initialize the EventReturn system Return an EventReturn instance ''' super(EventReturn, self).__init__(**kwargs) self.opts = opts self.event_return_queue = self.opts['event_return_queue'] local_minion_opts = self.opts.copy() local_minion_opts['file_client'] = 'local' self.minion = salt.minion.MasterMinion(local_minion_opts) self.event_queue = [] self.stop = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): # Flush and terminate if self.event_queue: self.flush_events() self.stop = True super(EventReturn, self)._handle_signals(signum, sigframe) def flush_events(self): if isinstance(self.opts['event_return'], list): # Multiple event returners for r in self.opts['event_return']: log.debug('Calling event returner %s, one of many.', r) event_return = '{0}.event_return'.format(r) self._flush_event_single(event_return) else: # Only a single event returner log.debug('Calling event returner %s, only one configured.', self.opts['event_return']) event_return = '{0}.event_return'.format( self.opts['event_return'] ) self._flush_event_single(event_return) del self.event_queue[:] def _flush_event_single(self, event_return): if event_return in self.minion.returners: try: self.minion.returners[event_return](self.event_queue) except Exception as exc: log.error('Could not store events - returner \'%s\' raised ' 'exception: %s', event_return, exc) # don't waste processing power unnecessarily on converting a # potentially huge dataset to a string if log.level <= logging.DEBUG: log.debug('Event data that caused an exception: %s', self.event_queue) else: log.error('Could not store return for event(s) - returner ' '\'%s\' not found.', event_return) def run(self): ''' Spin up the multiprocess event returner ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.event = get_event('master', opts=self.opts, listen=True) events = self.event.iter_events(full=True) self.event.fire_event({}, 'salt/event_listen/start') try: for event in events: if event['tag'] == 'salt/event/exit': self.stop = True if self._filter(event): self.event_queue.append(event) if len(self.event_queue) >= self.event_return_queue: self.flush_events() if self.stop: break finally: # flush all we have at this moment if self.event_queue: self.flush_events() def _filter(self, event): ''' Take an event and run it through configured filters. Returns True if event should be stored, else False ''' tag = event['tag'] if self.opts['event_return_whitelist']: ret = False else: ret = True for whitelist_match in self.opts['event_return_whitelist']: if fnmatch.fnmatch(tag, whitelist_match): ret = True break for blacklist_match in self.opts['event_return_blacklist']: if fnmatch.fnmatch(tag, blacklist_match): ret = False break return ret class StateFire(object): ''' Evaluate the data from a state run and fire events on the master and minion for each returned chunk that is not "green" This object is made to only run on a minion ''' def __init__(self, opts, auth=None): self.opts = opts if not auth: self.auth = salt.crypt.SAuth(self.opts) else: self.auth = auth def fire_master(self, data, tag, preload=None): ''' Fire an event off on the master server CLI Example: .. code-block:: bash salt '*' event.fire_master 'stuff to be in the event' 'tag' ''' load = {} if preload: load.update(preload) load.update({ 'id': self.opts['id'], 'tag': tag, 'data': data, 'cmd': '_minion_event', 'tok': self.auth.gen_token(b'salt'), }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True def fire_running(self, running): ''' Pass in a state "running" dict, this is the return dict from a state call. The dict will be processed and fire events. By default yellows and reds fire events on the master and minion, but this can be configured. ''' load = {'id': self.opts['id'], 'events': [], 'cmd': '_minion_event'} for stag in sorted( running, key=lambda k: running[k].get('__run_num__', 0)): if running[stag]['result'] and not running[stag]['changes']: continue tag = 'state_{0}_{1}'.format( six.text_type(running[stag]['result']), 'True' if running[stag]['changes'] else 'False') load['events'].append({ 'tag': tag, 'data': running[stag], }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True
saltstack/salt
salt/utils/event.py
SaltEvent.__load_uri
python
def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri
Return the string URI for the location of the pull and pub sockets to use for firing and listening to events
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L302-L339
null
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.subscribe
python
def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func])
Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L341-L354
[ "def _get_match_func(self, match_type=None):\n if match_type is None:\n match_type = self.opts['event_match_type']\n return getattr(self, '_match_tag_{0}'.format(match_type), None)\n" ]
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.unsubscribe
python
def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt)
Un-subscribe to events matching the passed tag.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L356-L370
[ "def _get_match_func(self, match_type=None):\n if match_type is None:\n match_type = self.opts['event_match_type']\n return getattr(self, '_match_tag_{0}'.format(match_type), None)\n" ]
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.connect_pub
python
def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub
Establish the publish connection
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L372-L402
null
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.close_pub
python
def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False
Close the publish connection (if established)
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L404-L414
[ "def close(self):\n '''\n Routines to handle any cleanup before the instance shuts down.\n Sockets and filehandles should be closed explicitly, to prevent\n leaks.\n '''\n if not self._closing:\n IPCClient.close(self)\n if self._closing:\n # This will prevent this message from showing up:\n # '[ERROR ] Future exception was never retrieved:\n # StreamClosedError'\n if self._read_sync_future is not None and self._read_sync_future.done():\n self._read_sync_future.exception()\n if self._read_stream_future is not None and self._read_stream_future.done():\n self._read_stream_future.exception()\n" ]
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.connect_pull
python
def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush
Establish a connection with the event pull socket Default timeout is 1 s
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L416-L446
null
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent._check_pending
python
def _check_pending(self, tag, match_func=None): if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret
Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L467-L492
[ "def _get_match_func(self, match_type=None):\n if match_type is None:\n match_type = self.opts['event_match_type']\n return getattr(self, '_match_tag_{0}'.format(match_type), None)\n" ]
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent._match_tag_regex
python
def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None
Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match)
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L521-L527
null
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.get_event
python
def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data']
Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L590-L667
[ "def connect_pub(self, timeout=None):\n '''\n Establish the publish connection\n '''\n if self.cpub:\n return True\n\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n if self.subscriber is None:\n self.subscriber = salt.transport.ipc.IPCMessageSubscriber(\n self.puburi,\n io_loop=self.io_loop\n )\n try:\n self.io_loop.run_sync(\n lambda: self.subscriber.connect(timeout=timeout))\n self.cpub = True\n except Exception:\n pass\n else:\n if self.subscriber is None:\n self.subscriber = salt.transport.ipc.IPCMessageSubscriber(\n self.puburi,\n io_loop=self.io_loop\n )\n\n # For the asynchronous case, the connect will be defered to when\n # set_event_handler() is invoked.\n self.cpub = True\n return self.cpub\n", "def close_pub(self):\n '''\n Close the publish connection (if established)\n '''\n if not self.cpub:\n return\n\n self.subscriber.close()\n self.subscriber = None\n self.pending_events = []\n self.cpub = False\n", "def _get_match_func(self, match_type=None):\n if match_type is None:\n match_type = self.opts['event_match_type']\n return getattr(self, '_match_tag_{0}'.format(match_type), None)\n", "def _check_pending(self, tag, match_func=None):\n \"\"\"Check the pending_events list for events that match the tag\n\n :param tag: The tag to search for\n :type tag: str\n :param tags_regex: List of re expressions to search for also\n :type tags_regex: list[re.compile()]\n :return:\n \"\"\"\n if match_func is None:\n match_func = self._get_match_func()\n old_events = self.pending_events\n self.pending_events = []\n ret = None\n for evt in old_events:\n if match_func(evt['tag'], tag):\n if ret is None:\n ret = evt\n log.trace('get_event() returning cached event = %s', ret)\n else:\n self.pending_events.append(evt)\n elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags):\n self.pending_events.append(evt)\n else:\n log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt)\n return ret\n", "def _get_event(self, wait, tag, match_func=None, no_block=False):\n if match_func is None:\n match_func = self._get_match_func()\n start = time.time()\n timeout_at = start + wait\n run_once = False\n if no_block is True:\n wait = 0\n elif wait == 0:\n # If no_block is False and wait is 0, that\n # means an infinite timeout.\n wait = None\n while (run_once is False and not wait) or time.time() <= timeout_at:\n if no_block is True:\n if run_once is True:\n break\n # Trigger that at least a single iteration has gone through\n run_once = True\n try:\n # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds.\n # IPCMessageSubscriber.read_sync() uses this type of timeout.\n if not self.cpub and not self.connect_pub(timeout=wait):\n break\n\n raw = self.subscriber.read_sync(timeout=wait)\n if raw is None:\n break\n mtag, data = self.unpack(raw, self.serial)\n ret = {'data': data, 'tag': mtag}\n except KeyboardInterrupt:\n return {'tag': 'salt/event/exit', 'data': {}}\n except tornado.iostream.StreamClosedError:\n if self.raise_errors:\n raise\n else:\n return None\n except RuntimeError:\n return None\n\n if not match_func(ret['tag'], tag):\n # tag not match\n if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags):\n log.trace('get_event() caching unwanted event = %s', ret)\n self.pending_events.append(ret)\n if wait: # only update the wait timeout if we had one\n wait = timeout_at - time.time()\n continue\n\n log.trace('get_event() received = %s', ret)\n return ret\n log.trace('_get_event() waited %s seconds and received nothing', wait)\n return None\n" ]
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.get_event_noblock
python
def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag}
Get the raw event without blocking or any other niceties
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L669-L682
[ "def connect_pub(self, timeout=None):\n '''\n Establish the publish connection\n '''\n if self.cpub:\n return True\n\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n if self.subscriber is None:\n self.subscriber = salt.transport.ipc.IPCMessageSubscriber(\n self.puburi,\n io_loop=self.io_loop\n )\n try:\n self.io_loop.run_sync(\n lambda: self.subscriber.connect(timeout=timeout))\n self.cpub = True\n except Exception:\n pass\n else:\n if self.subscriber is None:\n self.subscriber = salt.transport.ipc.IPCMessageSubscriber(\n self.puburi,\n io_loop=self.io_loop\n )\n\n # For the asynchronous case, the connect will be defered to when\n # set_event_handler() is invoked.\n self.cpub = True\n return self.cpub\n", "def unpack(cls, raw, serial=None):\n if serial is None:\n serial = salt.payload.Serial({'serial': 'msgpack'})\n\n if six.PY2:\n mtag, sep, mdata = raw.partition(TAGEND) # split tag from data\n data = serial.loads(mdata, encoding='utf-8')\n else:\n mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data\n mtag = salt.utils.stringutils.to_str(mtag)\n data = serial.loads(mdata, encoding='utf-8')\n return mtag, data\n" ]
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.iter_events
python
def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data
Creates a generator that continuously listens for events
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L700-L709
[ "def get_event(self,\n wait=5,\n tag='',\n full=False,\n match_type=None,\n no_block=False,\n auto_reconnect=False):\n '''\n Get a single publication.\n If no publication is available, then block for up to ``wait`` seconds.\n Return publication if it is available or ``None`` if no publication is\n available.\n\n If wait is 0, then block forever.\n\n tag\n Only return events matching the given tag. If not specified, or set\n to an empty string, all events are returned. It is recommended to\n always be selective on what is to be returned in the event that\n multiple requests are being multiplexed.\n\n match_type\n Set the function to match the search tag with event tags.\n - 'startswith' : search for event tags that start with tag\n - 'endswith' : search for event tags that end with tag\n - 'find' : search for event tags that contain tag\n - 'regex' : regex search '^' + tag event tags\n - 'fnmatch' : fnmatch tag event tags matching\n Default is opts['event_match_type'] or 'startswith'\n\n .. versionadded:: 2015.8.0\n\n no_block\n Define if getting the event should be a blocking call or not.\n Defaults to False to keep backwards compatibility.\n\n .. versionadded:: 2015.8.0\n\n Notes:\n\n Searches cached publications first. If no cached publications are found\n that match the given tag specification, new publications are received\n and checked.\n\n If a publication is received that does not match the tag specification,\n it is DISCARDED unless it is subscribed to via subscribe() which will\n cause it to be cached.\n\n If a caller is not going to call get_event immediately after sending a\n request, it MUST subscribe the result to ensure the response is not lost\n should other regions of code call get_event for other purposes.\n '''\n assert self._run_io_loop_sync\n\n match_func = self._get_match_func(match_type)\n\n ret = self._check_pending(tag, match_func)\n if ret is None:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n if auto_reconnect:\n raise_errors = self.raise_errors\n self.raise_errors = True\n while True:\n try:\n ret = self._get_event(wait, tag, match_func, no_block)\n break\n except tornado.iostream.StreamClosedError:\n self.close_pub()\n self.connect_pub(timeout=wait)\n continue\n self.raise_errors = raise_errors\n else:\n ret = self._get_event(wait, tag, match_func, no_block)\n\n if ret is None or full:\n return ret\n else:\n return ret['data']\n" ]
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.fire_event
python
def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True
Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L711-L767
[ "def trim_dict(\n data,\n max_dict_bytes,\n percent=50.0,\n stepper_size=10,\n replace_with='VALUE_TRIMMED',\n is_msgpacked=False,\n use_bin_type=False):\n '''\n Takes a dictionary and iterates over its keys, looking for\n large values and replacing them with a trimmed string.\n\n If after the first pass over dictionary keys, the dictionary\n is not sufficiently small, the stepper_size will be increased\n and the dictionary will be rescanned. This allows for progressive\n scanning, removing large items first and only making additional\n passes for smaller items if necessary.\n\n This function uses msgpack to calculate the size of the dictionary\n in question. While this might seem like unnecessary overhead, a\n data structure in python must be serialized in order for sys.getsizeof()\n to accurately return the items referenced in the structure.\n\n Ex:\n >>> salt.utils.trim_dict({'a': 'b', 'c': 'x' * 10000}, 100)\n {'a': 'b', 'c': 'VALUE_TRIMMED'}\n\n To improve performance, it is adviseable to pass in msgpacked\n data structures instead of raw dictionaries. If a msgpack\n structure is passed in, it will not be unserialized unless\n necessary.\n\n If a msgpack is passed in, it will be repacked if necessary\n before being returned.\n\n :param use_bin_type: Set this to true if \"is_msgpacked=True\"\n and the msgpack data has been encoded\n with \"use_bin_type=True\". This also means\n that the msgpack data should be decoded with\n \"encoding='utf-8'\".\n '''\n serializer = salt.payload.Serial({'serial': 'msgpack'})\n if is_msgpacked:\n dict_size = sys.getsizeof(data)\n else:\n dict_size = sys.getsizeof(serializer.dumps(data))\n if dict_size > max_dict_bytes:\n if is_msgpacked:\n if use_bin_type:\n data = serializer.loads(data, encoding='utf-8')\n else:\n data = serializer.loads(data)\n while True:\n percent = float(percent)\n max_val_size = float(max_dict_bytes * (percent / 100))\n try:\n for key in data:\n if isinstance(data[key], dict):\n _trim_dict_in_dict(data[key],\n max_val_size,\n replace_with)\n else:\n if sys.getsizeof(data[key]) > max_val_size:\n data[key] = replace_with\n percent = percent - stepper_size\n max_val_size = float(max_dict_bytes * (percent / 100))\n if use_bin_type:\n dump_data = serializer.dumps(data, use_bin_type=True)\n else:\n dump_data = serializer.dumps(data)\n cur_dict_size = sys.getsizeof(dump_data)\n if cur_dict_size < max_dict_bytes:\n if is_msgpacked: # Repack it\n return dump_data\n else:\n return data\n elif max_val_size == 0:\n if is_msgpacked:\n return dump_data\n else:\n return data\n except ValueError:\n pass\n if is_msgpacked:\n if use_bin_type:\n return serializer.dumps(data, use_bin_type=True)\n else:\n return serializer.dumps(data)\n else:\n return data\n else:\n return data\n", "def connect_pull(self, timeout=1):\n '''\n Establish a connection with the event pull socket\n Default timeout is 1 s\n '''\n if self.cpush:\n return True\n\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n if self.pusher is None:\n self.pusher = salt.transport.ipc.IPCMessageClient(\n self.pulluri,\n io_loop=self.io_loop\n )\n try:\n self.io_loop.run_sync(\n lambda: self.pusher.connect(timeout=timeout))\n self.cpush = True\n except Exception:\n pass\n else:\n if self.pusher is None:\n self.pusher = salt.transport.ipc.IPCMessageClient(\n self.pulluri,\n io_loop=self.io_loop\n )\n # For the asynchronous case, the connect will be deferred to when\n # fire_event() is invoked.\n self.cpush = True\n return self.cpush\n" ]
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.fire_master
python
def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout)
Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L769-L782
[ "def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n" ]
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent._fire_ret_load_specific_fun
python
def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass
Helper function for fire_ret_load
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L792-L844
null
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.fire_ret_load
python
def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load)
Fire events based on information in the return load
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L846-L874
null
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async() def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
SaltEvent.set_event_handler
python
def set_event_handler(self, event_handler): ''' Invoke the event_handler callback each time an event arrives. ''' assert not self._run_io_loop_sync if not self.cpub: self.connect_pub() self.subscriber.callbacks.add(event_handler) if not self.subscriber.reading: # This will handle reconnects return self.subscriber.read_async()
Invoke the event_handler callback each time an event arrives.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L880-L892
[ "def connect_pub(self, timeout=None):\n '''\n Establish the publish connection\n '''\n if self.cpub:\n return True\n\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n if self.subscriber is None:\n self.subscriber = salt.transport.ipc.IPCMessageSubscriber(\n self.puburi,\n io_loop=self.io_loop\n )\n try:\n self.io_loop.run_sync(\n lambda: self.subscriber.connect(timeout=timeout))\n self.cpub = True\n except Exception:\n pass\n else:\n if self.subscriber is None:\n self.subscriber = salt.transport.ipc.IPCMessageSubscriber(\n self.puburi,\n io_loop=self.io_loop\n )\n\n # For the asynchronous case, the connect will be defered to when\n # set_event_handler() is invoked.\n self.cpub = True\n return self.cpub\n" ]
class SaltEvent(object): ''' Warning! Use the get_event function or the code will not be RAET compatible The base class used to manage salt events ''' def __init__( self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False): ''' :param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party asynchronous code ''' self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if io_loop is not None: self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if opts is None: opts = {} if node == 'master': self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if sock_dir is None: sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if listen and not self.cpub: # Only connect to the publisher at initialization time if # we know we want to listen. If we connect to the publisher # and don't read out events from the buffer on an on-going basis, # the buffer will grow resulting in big memory usage. self.connect_pub() @classmethod def __load_cache_regex(cls): ''' Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with '^' ''' # This is in the class namespace, to minimize cache memory # usage and maximize cache hits # The prepend='^' is to reduce differences in behavior between # the default 'startswith' and the optional 'regex' match_type cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^') def __load_uri(self, sock_dir, node): ''' Return the string URI for the location of the pull and pub sockets to use for firing and listening to events ''' if node == 'master': if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join( sock_dir, 'master_event_pub.ipc' ) pulluri = os.path.join( sock_dir, 'master_event_pull.ipc' ) else: if self.opts['ipc_mode'] == 'tcp': puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) ) pulluri = os.path.join( sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash) ) log.debug('%s PUB socket URI: %s', self.__class__.__name__, puburi) log.debug('%s PULL socket URI: %s', self.__class__.__name__, pulluri) return puburi, pulluri def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func]) def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) def connect_pub(self, timeout=None): ''' Establish the publish connection ''' if self.cpub: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.subscriber.connect(timeout=timeout)) self.cpub = True except Exception: pass else: if self.subscriber is None: self.subscriber = salt.transport.ipc.IPCMessageSubscriber( self.puburi, io_loop=self.io_loop ) # For the asynchronous case, the connect will be defered to when # set_event_handler() is invoked. self.cpub = True return self.cpub def close_pub(self): ''' Close the publish connection (if established) ''' if not self.cpub: return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False def connect_pull(self, timeout=1): ''' Establish a connection with the event pull socket Default timeout is 1 s ''' if self.cpush: return True if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) try: self.io_loop.run_sync( lambda: self.pusher.connect(timeout=timeout)) self.cpush = True except Exception: pass else: if self.pusher is None: self.pusher = salt.transport.ipc.IPCMessageClient( self.pulluri, io_loop=self.io_loop ) # For the asynchronous case, the connect will be deferred to when # fire_event() is invoked. self.cpush = True return self.cpush @classmethod def unpack(cls, raw, serial=None): if serial is None: serial = salt.payload.Serial({'serial': 'msgpack'}) if six.PY2: mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata, encoding='utf-8') else: mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data def _get_match_func(self, match_type=None): if match_type is None: match_type = self.opts['event_match_type'] return getattr(self, '_match_tag_{0}'.format(match_type), None) def _check_pending(self, tag, match_func=None): """Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return: """ if match_func is None: match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if ret is None: ret = evt log.trace('get_event() returning cached event = %s', ret) else: self.pending_events.append(evt) elif any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = %s', evt) return ret @staticmethod def _match_tag_startswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match) ''' return event_tag.startswith(search_tag) @staticmethod def _match_tag_endswith(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match) ''' return event_tag.endswith(search_tag) @staticmethod def _match_tag_find(event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match) ''' return event_tag.find(search_tag) >= 0 def _match_tag_regex(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match) ''' return self.cache_regex.get(search_tag).search(event_tag) is not None def _match_tag_fnmatch(self, event_tag, search_tag): ''' Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match) ''' return fnmatch.fnmatch(event_tag, search_tag) def _get_event(self, wait, tag, match_func=None, no_block=False): if match_func is None: match_func = self._get_match_func() start = time.time() timeout_at = start + wait run_once = False if no_block is True: wait = 0 elif wait == 0: # If no_block is False and wait is 0, that # means an infinite timeout. wait = None while (run_once is False and not wait) or time.time() <= timeout_at: if no_block is True: if run_once is True: break # Trigger that at least a single iteration has gone through run_once = True try: # tornado.ioloop.IOLoop.run_sync() timeouts are in seconds. # IPCMessageSubscriber.read_sync() uses this type of timeout. if not self.cpub and not self.connect_pub(timeout=wait): break raw = self.subscriber.read_sync(timeout=wait) if raw is None: break mtag, data = self.unpack(raw, self.serial) ret = {'data': data, 'tag': mtag} except KeyboardInterrupt: return {'tag': 'salt/event/exit', 'data': {}} except tornado.iostream.StreamClosedError: if self.raise_errors: raise else: return None except RuntimeError: return None if not match_func(ret['tag'], tag): # tag not match if any(pmatch_func(ret['tag'], ptag) for ptag, pmatch_func in self.pending_tags): log.trace('get_event() caching unwanted event = %s', ret) self.pending_events.append(ret) if wait: # only update the wait timeout if we had one wait = timeout_at - time.time() continue log.trace('get_event() received = %s', ret) return ret log.trace('_get_event() waited %s seconds and received nothing', wait) return None def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False): ''' Get a single publication. If no publication is available, then block for up to ``wait`` seconds. Return publication if it is available or ``None`` if no publication is available. If wait is 0, then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed. match_type Set the function to match the search tag with event tags. - 'startswith' : search for event tags that start with tag - 'endswith' : search for event tags that end with tag - 'find' : search for event tags that contain tag - 'regex' : regex search '^' + tag event tags - 'fnmatch' : fnmatch tag event tags matching Default is opts['event_match_type'] or 'startswith' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes. ''' assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if ret is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ret is None or full: return ret else: return ret['data'] def get_event_noblock(self): ''' Get the raw event without blocking or any other niceties ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=0) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def get_event_block(self): ''' Get the raw event in a blocking fashion. This is slower, but it decreases the possibility of dropped events. ''' assert self._run_io_loop_sync if not self.cpub: if not self.connect_pub(): return None raw = self.subscriber.read_sync(timeout=None) if raw is None: return None mtag, data = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag} def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False): ''' Creates a generator that continuously listens for events ''' while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if data is None: continue yield data def fire_event(self, data, tag, timeout=1000): ''' Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms ''' if not six.text_type(tag): # no empty tags allowed raise ValueError('Empty tag.') if not isinstance(data, MutableMapping): # data must be dict raise ValueError( 'Dict object expected, not \'{0}\'.'.format(data) ) if not self.cpush: if timeout is not None: timeout_s = float(timeout) / 1000 else: timeout_s = None if not self.connect_pull(timeout=timeout_s): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: # Since the pack / unpack logic here is for local events only, # it is safe to change the wire protocol. The mechanism # that sends events from minion to master is outside this # file. dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict( dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3 ) log.debug('Sending event: tag = %s; data = %s', tag, data) event = b''.join([ salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.asynchronous.current_ioloop(self.io_loop): try: self.io_loop.run_sync(lambda: self.pusher.send(msg)) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True def fire_master(self, data, tag, timeout=1000): '''' Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms ''' msg = { 'tag': tag, 'data': data, 'events': None, 'pretag': None } return self.fire_event(msg, "fire_master", timeout) def destroy(self): if self.subscriber is not None: self.subscriber.close() if self.pusher is not None: self.pusher.close() if self._run_io_loop_sync and not self.keep_loop: self.io_loop.close() def _fire_ret_load_specific_fun(self, load, fun_index=0): ''' Helper function for fire_ret_load ''' if isinstance(load['fun'], list): # Multi-function job fun = load['fun'][fun_index] # 'retcode' was already validated to exist and be non-zero # for the given function in the caller. if isinstance(load['retcode'], list): # Multi-function ordered ret = load.get('return') if isinstance(ret, list) and len(ret) > fun_index: ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: # Single-function job fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for tag, data in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if data.get('result') is False: self.fire_event( data, '{0}.{1}'.format(tags[0], tags[-1]) ) # old dup event data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format( tags[0], tags[-1]) data['fun'] = fun data['user'] = load['user'] self.fire_event( data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass def fire_ret_load(self, load): ''' Fire events based on information in the return load ''' if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): # Multi-function job if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load, fun_index) else: # Single-function job if load['fun'] in SUB_EVENT: # Minion fired a bad retcode, fire an event self._fire_ret_load_specific_fun(load) def remove_event_handler(self, event_handler): if event_handler in self.subscriber.callbacks: self.subscriber.callbacks.remove(event_handler) def __del__(self): # skip exceptions in destroy-- since destroy() doesn't cover interpreter # shutdown-- where globals start going missing try: self.destroy() except Exception: pass
saltstack/salt
salt/utils/event.py
EventPublisher.run
python
def run(self): ''' Bind the pub and pull sockets for events ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.io_loop = tornado.ioloop.IOLoop() with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_master_pub_port']) epull_uri = int(self.opts['tcp_master_pull_port']) else: epub_uri = os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc' ) epull_uri = os.path.join( self.opts['sock_dir'], 'master_event_pull.ipc' ) self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish, ) # Start the master event publisher with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() if (self.opts['ipc_mode'] != 'tcp' and ( self.opts['publisher_acl'] or self.opts['external_auth'])): os.chmod(os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666) # Make sure the IO loop and respective sockets are closed and # destroyed Finalize(self, self.close, exitpriority=15) self.io_loop.start()
Bind the pub and pull sockets for events
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L1100-L1147
[ "def appendproctitle(name):\n '''\n Append \"name\" to the current process title\n '''\n if HAS_SETPROCTITLE:\n setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)\n", "def start(self):\n '''\n Perform the work necessary to start up a Tornado IPC server\n\n Blocks until socket is established\n '''\n # Start up the ioloop\n log.trace('IPCServer: binding to socket: %s', self.socket_path)\n if isinstance(self.socket_path, int):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if self.opts.get('ipc_so_sndbuf'):\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.opts['ipc_so_sndbuf'])\n if self.opts.get('ipc_so_rcvbuf'):\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.opts['ipc_so_rcvbuf'])\n self.sock.setblocking(0)\n self.sock.bind(('127.0.0.1', self.socket_path))\n # Based on default used in tornado.netutil.bind_sockets()\n self.sock.listen(self.opts['ipc_so_backlog'])\n else:\n # sndbuf/rcvbuf does not apply to unix sockets\n self.sock = tornado.netutil.bind_unix_socket(self.socket_path, backlog=self.opts['ipc_so_backlog'])\n\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n tornado.netutil.add_accept_handler(\n self.sock,\n self.handle_connection,\n )\n self._started = True\n", "def start(self):\n '''\n Perform the work necessary to start up a Tornado IPC server\n\n Blocks until socket is established\n '''\n # Start up the ioloop\n log.trace('IPCMessagePublisher: binding to socket: %s', self.socket_path)\n if isinstance(self.socket_path, int):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if self.opts.get('ipc_so_sndbuf'):\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.opts['ipc_so_sndbuf'])\n if self.opts.get('ipc_so_rcvbuf'):\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.opts['ipc_so_rcvbuf'])\n self.sock.setblocking(0)\n self.sock.bind(('127.0.0.1', self.socket_path))\n # Based on default used in tornado.netutil.bind_sockets()\n self.sock.listen(self.opts['ipc_so_backlog'])\n else:\n # sndbuf/rcvbuf does not apply to unix sockets\n self.sock = tornado.netutil.bind_unix_socket(self.socket_path, backlog=self.opts['ipc_so_backlog'])\n\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n tornado.netutil.add_accept_handler(\n self.sock,\n self.handle_connection,\n )\n self._started = True\n" ]
class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' The interface that takes master events and republishes them out to anyone who wants to listen ''' def __init__(self, opts, **kwargs): super(EventPublisher, self).__init__(**kwargs) self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() self.opts.update(opts) self._closing = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling master events', exc_info=True) return None def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() if hasattr(self, 'io_loop'): self.io_loop.close() def _handle_signals(self, signum, sigframe): self.close() super(EventPublisher, self)._handle_signals(signum, sigframe) def __del__(self): self.close()
saltstack/salt
salt/utils/event.py
EventPublisher.handle_publish
python
def handle_publish(self, package, _): ''' Get something from epull, publish it out epub, and return the package (or None) ''' try: self.publisher.publish(package) return package # Add an extra fallback in case a forked process leeks through except Exception: log.critical('Unexpected error while polling master events', exc_info=True) return None
Get something from epull, publish it out epub, and return the package (or None)
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L1149-L1160
null
class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' The interface that takes master events and republishes them out to anyone who wants to listen ''' def __init__(self, opts, **kwargs): super(EventPublisher, self).__init__(**kwargs) self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() self.opts.update(opts) self._closing = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def run(self): ''' Bind the pub and pull sockets for events ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.io_loop = tornado.ioloop.IOLoop() with salt.utils.asynchronous.current_ioloop(self.io_loop): if self.opts['ipc_mode'] == 'tcp': epub_uri = int(self.opts['tcp_master_pub_port']) epull_uri = int(self.opts['tcp_master_pull_port']) else: epub_uri = os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc' ) epull_uri = os.path.join( self.opts['sock_dir'], 'master_event_pull.ipc' ) self.publisher = salt.transport.ipc.IPCMessagePublisher( self.opts, epub_uri, io_loop=self.io_loop ) self.puller = salt.transport.ipc.IPCMessageServer( self.opts, epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish, ) # Start the master event publisher with salt.utils.files.set_umask(0o177): self.publisher.start() self.puller.start() if (self.opts['ipc_mode'] != 'tcp' and ( self.opts['publisher_acl'] or self.opts['external_auth'])): os.chmod(os.path.join( self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666) # Make sure the IO loop and respective sockets are closed and # destroyed Finalize(self, self.close, exitpriority=15) self.io_loop.start() def close(self): if self._closing: return self._closing = True if hasattr(self, 'publisher'): self.publisher.close() if hasattr(self, 'puller'): self.puller.close() if hasattr(self, 'io_loop'): self.io_loop.close() def _handle_signals(self, signum, sigframe): self.close() super(EventPublisher, self)._handle_signals(signum, sigframe) def __del__(self): self.close()
saltstack/salt
salt/utils/event.py
EventReturn.run
python
def run(self): ''' Spin up the multiprocess event returner ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.event = get_event('master', opts=self.opts, listen=True) events = self.event.iter_events(full=True) self.event.fire_event({}, 'salt/event_listen/start') try: for event in events: if event['tag'] == 'salt/event/exit': self.stop = True if self._filter(event): self.event_queue.append(event) if len(self.event_queue) >= self.event_return_queue: self.flush_events() if self.stop: break finally: # flush all we have at this moment if self.event_queue: self.flush_events()
Spin up the multiprocess event returner
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L1270-L1290
[ "def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n", "def appendproctitle(name):\n '''\n Append \"name\" to the current process title\n '''\n if HAS_SETPROCTITLE:\n setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)\n" ]
class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' A dedicated process which listens to the master event bus and queues and forwards events to the specified returner. ''' def __new__(cls, *args, **kwargs): if sys.platform.startswith('win'): # This is required for Windows. On Linux, when a process is # forked, the module namespace is copied and the current process # gets all of sys.modules from where the fork happens. This is not # the case for Windows. import salt.minion # pylint: disable=unused-import instance = super(EventReturn, cls).__new__(cls, *args, **kwargs) return instance def __init__(self, opts, **kwargs): ''' Initialize the EventReturn system Return an EventReturn instance ''' super(EventReturn, self).__init__(**kwargs) self.opts = opts self.event_return_queue = self.opts['event_return_queue'] local_minion_opts = self.opts.copy() local_minion_opts['file_client'] = 'local' self.minion = salt.minion.MasterMinion(local_minion_opts) self.event_queue = [] self.stop = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): # Flush and terminate if self.event_queue: self.flush_events() self.stop = True super(EventReturn, self)._handle_signals(signum, sigframe) def flush_events(self): if isinstance(self.opts['event_return'], list): # Multiple event returners for r in self.opts['event_return']: log.debug('Calling event returner %s, one of many.', r) event_return = '{0}.event_return'.format(r) self._flush_event_single(event_return) else: # Only a single event returner log.debug('Calling event returner %s, only one configured.', self.opts['event_return']) event_return = '{0}.event_return'.format( self.opts['event_return'] ) self._flush_event_single(event_return) del self.event_queue[:] def _flush_event_single(self, event_return): if event_return in self.minion.returners: try: self.minion.returners[event_return](self.event_queue) except Exception as exc: log.error('Could not store events - returner \'%s\' raised ' 'exception: %s', event_return, exc) # don't waste processing power unnecessarily on converting a # potentially huge dataset to a string if log.level <= logging.DEBUG: log.debug('Event data that caused an exception: %s', self.event_queue) else: log.error('Could not store return for event(s) - returner ' '\'%s\' not found.', event_return) def _filter(self, event): ''' Take an event and run it through configured filters. Returns True if event should be stored, else False ''' tag = event['tag'] if self.opts['event_return_whitelist']: ret = False else: ret = True for whitelist_match in self.opts['event_return_whitelist']: if fnmatch.fnmatch(tag, whitelist_match): ret = True break for blacklist_match in self.opts['event_return_blacklist']: if fnmatch.fnmatch(tag, blacklist_match): ret = False break return ret
saltstack/salt
salt/utils/event.py
EventReturn._filter
python
def _filter(self, event): ''' Take an event and run it through configured filters. Returns True if event should be stored, else False ''' tag = event['tag'] if self.opts['event_return_whitelist']: ret = False else: ret = True for whitelist_match in self.opts['event_return_whitelist']: if fnmatch.fnmatch(tag, whitelist_match): ret = True break for blacklist_match in self.opts['event_return_blacklist']: if fnmatch.fnmatch(tag, blacklist_match): ret = False break return ret
Take an event and run it through configured filters. Returns True if event should be stored, else False
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L1292-L1311
null
class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' A dedicated process which listens to the master event bus and queues and forwards events to the specified returner. ''' def __new__(cls, *args, **kwargs): if sys.platform.startswith('win'): # This is required for Windows. On Linux, when a process is # forked, the module namespace is copied and the current process # gets all of sys.modules from where the fork happens. This is not # the case for Windows. import salt.minion # pylint: disable=unused-import instance = super(EventReturn, cls).__new__(cls, *args, **kwargs) return instance def __init__(self, opts, **kwargs): ''' Initialize the EventReturn system Return an EventReturn instance ''' super(EventReturn, self).__init__(**kwargs) self.opts = opts self.event_return_queue = self.opts['event_return_queue'] local_minion_opts = self.opts.copy() local_minion_opts['file_client'] = 'local' self.minion = salt.minion.MasterMinion(local_minion_opts) self.event_queue = [] self.stop = False # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): # Flush and terminate if self.event_queue: self.flush_events() self.stop = True super(EventReturn, self)._handle_signals(signum, sigframe) def flush_events(self): if isinstance(self.opts['event_return'], list): # Multiple event returners for r in self.opts['event_return']: log.debug('Calling event returner %s, one of many.', r) event_return = '{0}.event_return'.format(r) self._flush_event_single(event_return) else: # Only a single event returner log.debug('Calling event returner %s, only one configured.', self.opts['event_return']) event_return = '{0}.event_return'.format( self.opts['event_return'] ) self._flush_event_single(event_return) del self.event_queue[:] def _flush_event_single(self, event_return): if event_return in self.minion.returners: try: self.minion.returners[event_return](self.event_queue) except Exception as exc: log.error('Could not store events - returner \'%s\' raised ' 'exception: %s', event_return, exc) # don't waste processing power unnecessarily on converting a # potentially huge dataset to a string if log.level <= logging.DEBUG: log.debug('Event data that caused an exception: %s', self.event_queue) else: log.error('Could not store return for event(s) - returner ' '\'%s\' not found.', event_return) def run(self): ''' Spin up the multiprocess event returner ''' salt.utils.process.appendproctitle(self.__class__.__name__) self.event = get_event('master', opts=self.opts, listen=True) events = self.event.iter_events(full=True) self.event.fire_event({}, 'salt/event_listen/start') try: for event in events: if event['tag'] == 'salt/event/exit': self.stop = True if self._filter(event): self.event_queue.append(event) if len(self.event_queue) >= self.event_return_queue: self.flush_events() if self.stop: break finally: # flush all we have at this moment if self.event_queue: self.flush_events()
saltstack/salt
salt/utils/event.py
StateFire.fire_master
python
def fire_master(self, data, tag, preload=None): ''' Fire an event off on the master server CLI Example: .. code-block:: bash salt '*' event.fire_master 'stuff to be in the event' 'tag' ''' load = {} if preload: load.update(preload) load.update({ 'id': self.opts['id'], 'tag': tag, 'data': data, 'cmd': '_minion_event', 'tok': self.auth.gen_token(b'salt'), }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True
Fire an event off on the master server CLI Example: .. code-block:: bash salt '*' event.fire_master 'stuff to be in the event' 'tag'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L1327-L1356
[ "def factory(opts, **kwargs):\n # All Sync interfaces are just wrappers around the Async ones\n sync = SyncWrapper(AsyncReqChannel.factory, (opts,), kwargs)\n return sync\n" ]
class StateFire(object): ''' Evaluate the data from a state run and fire events on the master and minion for each returned chunk that is not "green" This object is made to only run on a minion ''' def __init__(self, opts, auth=None): self.opts = opts if not auth: self.auth = salt.crypt.SAuth(self.opts) else: self.auth = auth def fire_running(self, running): ''' Pass in a state "running" dict, this is the return dict from a state call. The dict will be processed and fire events. By default yellows and reds fire events on the master and minion, but this can be configured. ''' load = {'id': self.opts['id'], 'events': [], 'cmd': '_minion_event'} for stag in sorted( running, key=lambda k: running[k].get('__run_num__', 0)): if running[stag]['result'] and not running[stag]['changes']: continue tag = 'state_{0}_{1}'.format( six.text_type(running[stag]['result']), 'True' if running[stag]['changes'] else 'False') load['events'].append({ 'tag': tag, 'data': running[stag], }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True
saltstack/salt
salt/utils/event.py
StateFire.fire_running
python
def fire_running(self, running): ''' Pass in a state "running" dict, this is the return dict from a state call. The dict will be processed and fire events. By default yellows and reds fire events on the master and minion, but this can be configured. ''' load = {'id': self.opts['id'], 'events': [], 'cmd': '_minion_event'} for stag in sorted( running, key=lambda k: running[k].get('__run_num__', 0)): if running[stag]['result'] and not running[stag]['changes']: continue tag = 'state_{0}_{1}'.format( six.text_type(running[stag]['result']), 'True' if running[stag]['changes'] else 'False') load['events'].append({ 'tag': tag, 'data': running[stag], }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True
Pass in a state "running" dict, this is the return dict from a state call. The dict will be processed and fire events. By default yellows and reds fire events on the master and minion, but this can be configured.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L1358-L1388
[ "def factory(opts, **kwargs):\n # All Sync interfaces are just wrappers around the Async ones\n sync = SyncWrapper(AsyncReqChannel.factory, (opts,), kwargs)\n return sync\n" ]
class StateFire(object): ''' Evaluate the data from a state run and fire events on the master and minion for each returned chunk that is not "green" This object is made to only run on a minion ''' def __init__(self, opts, auth=None): self.opts = opts if not auth: self.auth = salt.crypt.SAuth(self.opts) else: self.auth = auth def fire_master(self, data, tag, preload=None): ''' Fire an event off on the master server CLI Example: .. code-block:: bash salt '*' event.fire_master 'stuff to be in the event' 'tag' ''' load = {} if preload: load.update(preload) load.update({ 'id': self.opts['id'], 'tag': tag, 'data': data, 'cmd': '_minion_event', 'tok': self.auth.gen_token(b'salt'), }) channel = salt.transport.client.ReqChannel.factory(self.opts) try: channel.send(load) except Exception: pass finally: channel.close() return True
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
KiwiExporter.load
python
def load(self, **descr): ''' Load data by keys. :param data: :return: ''' for obj, data in descr.items(): setattr(self._data, obj, data) return self
Load data by keys. :param data: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L50-L60
null
class KiwiExporter(object): ''' Exports system description as Kiwi configuration. ''' def __init__(self, grains, format): self.__grains__ = grains self.format = format self._data = type('data', (), {}) self.name = None def export(self, name): ''' Export to the Kiwi config.xml as text. :return: ''' self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString( etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n") if line.strip()]) def _get_package_manager(self): ''' Get package manager. :return: ''' ret = None if self.__grains__.get('os_family') in ('Kali', 'Debian'): ret = 'apt-get' elif self.__grains__.get('os_family', '') == 'Suse': ret = 'zypper' elif self.__grains__.get('os_family', '') == 'redhat': ret = 'yum' if ret is None: raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret def _set_preferences(self, node): ''' Set preferences. :return: ''' pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for disk_id, disk_data in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', disk_data.get('type') or 'ext3') break p_type.set('installiso', 'true') p_type.set('boot', "vmxboot/suse-leap42.1") p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref def _get_user_groups(self, user): ''' Get user groups. :param user: :return: ''' return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name] def _set_users(self, node): ''' Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return: ''' # Get real local users with the local passwords shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): login, pwd = sh_line.split(":")[:2] if pwd and pwd[0] not in '!*': shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if ps_line[0] in shadow: shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for u_name, u_data in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node def _set_repositories(self, node): ''' Create repositories. :param node: :return: ''' priority = 99 for repo_id, repo_data in self._data.software.get('repositories', {}).items(): if type(repo_data) == list: repo_data = repo_data[0] if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively uri = repo_data.get('baseurl', repo_data.get('uri')) if not uri: continue repo = etree.SubElement(node, 'repository') if self.__grains__.get('os_family') in ('Kali', 'Debian'): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if self.__grains__.get('os_family', '') == 'Suse': repo.set('type', 'yast2') # TODO: Check for options! repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) # RPM and Debian, respectively priority -= 1 def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs def _set_description(self, node): ''' Create a system description. :return: ''' hostname = socket.getfqdn() or platform.node() descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = "salt.modules.node on {0}".format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
KiwiExporter.export
python
def export(self, name): ''' Export to the Kiwi config.xml as text. :return: ''' self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString( etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n") if line.strip()])
Export to the Kiwi config.xml as text. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L62-L79
[ "def _set_preferences(self, node):\n '''\n Set preferences.\n\n :return:\n '''\n pref = etree.SubElement(node, 'preferences')\n pacman = etree.SubElement(pref, 'packagemanager')\n pacman.text = self._get_package_manager()\n p_version = etree.SubElement(pref, 'version')\n p_version.text = '0.0.1'\n p_type = etree.SubElement(pref, 'type')\n p_type.set('image', 'vmx')\n\n for disk_id, disk_data in self._data.system.get('disks', {}).items():\n if disk_id.startswith('/dev'):\n p_type.set('filesystem', disk_data.get('type') or 'ext3')\n break\n\n p_type.set('installiso', 'true')\n p_type.set('boot', \"vmxboot/suse-leap42.1\")\n p_type.set('format', self.format)\n p_type.set('bootloader', 'grub2')\n p_type.set('timezone', __salt__['timezone.get_zone']())\n p_type.set('hwclock', __salt__['timezone.get_hwclock']())\n\n return pref\n", "def _set_users(self, node):\n '''\n Create existing local users.\n\n <users group=\"root\">\n <user password=\"$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0\" home=\"/root\" name=\"root\"/>\n </users>\n\n :param node:\n :return:\n '''\n # Get real local users with the local passwords\n shadow = {}\n with salt.utils.files.fopen('/etc/shadow') as rfh:\n for sh_line in rfh.read().split(os.linesep):\n if sh_line.strip():\n login, pwd = sh_line.split(\":\")[:2]\n if pwd and pwd[0] not in '!*':\n shadow[login] = {'p': pwd}\n\n with salt.utils.files.fopen('/etc/passwd') as rfh:\n for ps_line in rfh.read().split(os.linesep):\n if ps_line.strip():\n ps_line = ps_line.strip().split(':')\n if ps_line[0] in shadow:\n shadow[ps_line[0]]['h'] = ps_line[5]\n shadow[ps_line[0]]['s'] = ps_line[6]\n shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0])\n\n users_groups = []\n users_node = etree.SubElement(node, 'users')\n for u_name, u_data in shadow.items():\n user_node = etree.SubElement(users_node, 'user')\n user_node.set('password', u_data['p'])\n user_node.set('home', u_data['h'])\n user_node.set('name', u_name)\n users_groups.extend(u_data['g'])\n users_node.set('group', ','.join(users_groups))\n\n return users_node\n", "def _set_repositories(self, node):\n '''\n Create repositories.\n\n :param node:\n :return:\n '''\n priority = 99\n\n for repo_id, repo_data in self._data.software.get('repositories', {}).items():\n if type(repo_data) == list:\n repo_data = repo_data[0]\n if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively\n uri = repo_data.get('baseurl', repo_data.get('uri'))\n if not uri:\n continue\n repo = etree.SubElement(node, 'repository')\n if self.__grains__.get('os_family') in ('Kali', 'Debian'):\n repo.set('alias', repo_id)\n repo.set('distribution', repo_data['dist'])\n else:\n repo.set('alias', repo_data['alias'])\n if self.__grains__.get('os_family', '') == 'Suse':\n repo.set('type', 'yast2') # TODO: Check for options!\n repo.set('priority', str(priority))\n source = etree.SubElement(repo, 'source')\n source.set('path', uri) # RPM and Debian, respectively\n priority -= 1\n", "def _set_packages(self, node):\n '''\n Set packages and collections.\n\n :param node:\n :return:\n '''\n pkgs = etree.SubElement(node, 'packages')\n for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()):\n pkg = etree.SubElement(pkgs, 'package')\n pkg.set('name', pkg_name)\n\n # Add collections (SUSE)\n if self.__grains__.get('os_family', '') == 'Suse':\n for ptn_id, ptn_data in self._data.software.get('patterns', {}).items():\n if ptn_data.get('installed'):\n ptn = etree.SubElement(pkgs, 'namedCollection')\n ptn.set('name', ptn_id)\n\n return pkgs\n", "def _set_description(self, node):\n '''\n Create a system description.\n\n :return:\n '''\n hostname = socket.getfqdn() or platform.node()\n\n descr = etree.SubElement(node, 'description')\n author = etree.SubElement(descr, 'author')\n author.text = \"salt.modules.node on {0}\".format(hostname)\n contact = etree.SubElement(descr, 'contact')\n contact.text = 'root@{0}'.format(hostname)\n specs = etree.SubElement(descr, 'specification')\n specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname)\n\n return descr\n", "def _create_doc(self):\n '''\n Create document.\n\n :return:\n '''\n root = etree.Element('image')\n root.set('schemaversion', '6.3')\n root.set('name', self.name)\n\n return root\n" ]
class KiwiExporter(object): ''' Exports system description as Kiwi configuration. ''' def __init__(self, grains, format): self.__grains__ = grains self.format = format self._data = type('data', (), {}) self.name = None def load(self, **descr): ''' Load data by keys. :param data: :return: ''' for obj, data in descr.items(): setattr(self._data, obj, data) return self def _get_package_manager(self): ''' Get package manager. :return: ''' ret = None if self.__grains__.get('os_family') in ('Kali', 'Debian'): ret = 'apt-get' elif self.__grains__.get('os_family', '') == 'Suse': ret = 'zypper' elif self.__grains__.get('os_family', '') == 'redhat': ret = 'yum' if ret is None: raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret def _set_preferences(self, node): ''' Set preferences. :return: ''' pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for disk_id, disk_data in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', disk_data.get('type') or 'ext3') break p_type.set('installiso', 'true') p_type.set('boot', "vmxboot/suse-leap42.1") p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref def _get_user_groups(self, user): ''' Get user groups. :param user: :return: ''' return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name] def _set_users(self, node): ''' Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return: ''' # Get real local users with the local passwords shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): login, pwd = sh_line.split(":")[:2] if pwd and pwd[0] not in '!*': shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if ps_line[0] in shadow: shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for u_name, u_data in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node def _set_repositories(self, node): ''' Create repositories. :param node: :return: ''' priority = 99 for repo_id, repo_data in self._data.software.get('repositories', {}).items(): if type(repo_data) == list: repo_data = repo_data[0] if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively uri = repo_data.get('baseurl', repo_data.get('uri')) if not uri: continue repo = etree.SubElement(node, 'repository') if self.__grains__.get('os_family') in ('Kali', 'Debian'): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if self.__grains__.get('os_family', '') == 'Suse': repo.set('type', 'yast2') # TODO: Check for options! repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) # RPM and Debian, respectively priority -= 1 def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs def _set_description(self, node): ''' Create a system description. :return: ''' hostname = socket.getfqdn() or platform.node() descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = "salt.modules.node on {0}".format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
KiwiExporter._get_package_manager
python
def _get_package_manager(self): ''' Get package manager. :return: ''' ret = None if self.__grains__.get('os_family') in ('Kali', 'Debian'): ret = 'apt-get' elif self.__grains__.get('os_family', '') == 'Suse': ret = 'zypper' elif self.__grains__.get('os_family', '') == 'redhat': ret = 'yum' if ret is None: raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret
Get package manager. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L81-L98
null
class KiwiExporter(object): ''' Exports system description as Kiwi configuration. ''' def __init__(self, grains, format): self.__grains__ = grains self.format = format self._data = type('data', (), {}) self.name = None def load(self, **descr): ''' Load data by keys. :param data: :return: ''' for obj, data in descr.items(): setattr(self._data, obj, data) return self def export(self, name): ''' Export to the Kiwi config.xml as text. :return: ''' self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString( etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n") if line.strip()]) def _set_preferences(self, node): ''' Set preferences. :return: ''' pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for disk_id, disk_data in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', disk_data.get('type') or 'ext3') break p_type.set('installiso', 'true') p_type.set('boot', "vmxboot/suse-leap42.1") p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref def _get_user_groups(self, user): ''' Get user groups. :param user: :return: ''' return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name] def _set_users(self, node): ''' Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return: ''' # Get real local users with the local passwords shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): login, pwd = sh_line.split(":")[:2] if pwd and pwd[0] not in '!*': shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if ps_line[0] in shadow: shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for u_name, u_data in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node def _set_repositories(self, node): ''' Create repositories. :param node: :return: ''' priority = 99 for repo_id, repo_data in self._data.software.get('repositories', {}).items(): if type(repo_data) == list: repo_data = repo_data[0] if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively uri = repo_data.get('baseurl', repo_data.get('uri')) if not uri: continue repo = etree.SubElement(node, 'repository') if self.__grains__.get('os_family') in ('Kali', 'Debian'): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if self.__grains__.get('os_family', '') == 'Suse': repo.set('type', 'yast2') # TODO: Check for options! repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) # RPM and Debian, respectively priority -= 1 def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs def _set_description(self, node): ''' Create a system description. :return: ''' hostname = socket.getfqdn() or platform.node() descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = "salt.modules.node on {0}".format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
KiwiExporter._set_preferences
python
def _set_preferences(self, node): ''' Set preferences. :return: ''' pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for disk_id, disk_data in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', disk_data.get('type') or 'ext3') break p_type.set('installiso', 'true') p_type.set('boot', "vmxboot/suse-leap42.1") p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref
Set preferences. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L100-L126
[ "def _get_package_manager(self):\n '''\n Get package manager.\n\n :return:\n '''\n ret = None\n if self.__grains__.get('os_family') in ('Kali', 'Debian'):\n ret = 'apt-get'\n elif self.__grains__.get('os_family', '') == 'Suse':\n ret = 'zypper'\n elif self.__grains__.get('os_family', '') == 'redhat':\n ret = 'yum'\n\n if ret is None:\n raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family')))\n\n return ret\n" ]
class KiwiExporter(object): ''' Exports system description as Kiwi configuration. ''' def __init__(self, grains, format): self.__grains__ = grains self.format = format self._data = type('data', (), {}) self.name = None def load(self, **descr): ''' Load data by keys. :param data: :return: ''' for obj, data in descr.items(): setattr(self._data, obj, data) return self def export(self, name): ''' Export to the Kiwi config.xml as text. :return: ''' self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString( etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n") if line.strip()]) def _get_package_manager(self): ''' Get package manager. :return: ''' ret = None if self.__grains__.get('os_family') in ('Kali', 'Debian'): ret = 'apt-get' elif self.__grains__.get('os_family', '') == 'Suse': ret = 'zypper' elif self.__grains__.get('os_family', '') == 'redhat': ret = 'yum' if ret is None: raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret def _get_user_groups(self, user): ''' Get user groups. :param user: :return: ''' return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name] def _set_users(self, node): ''' Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return: ''' # Get real local users with the local passwords shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): login, pwd = sh_line.split(":")[:2] if pwd and pwd[0] not in '!*': shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if ps_line[0] in shadow: shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for u_name, u_data in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node def _set_repositories(self, node): ''' Create repositories. :param node: :return: ''' priority = 99 for repo_id, repo_data in self._data.software.get('repositories', {}).items(): if type(repo_data) == list: repo_data = repo_data[0] if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively uri = repo_data.get('baseurl', repo_data.get('uri')) if not uri: continue repo = etree.SubElement(node, 'repository') if self.__grains__.get('os_family') in ('Kali', 'Debian'): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if self.__grains__.get('os_family', '') == 'Suse': repo.set('type', 'yast2') # TODO: Check for options! repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) # RPM and Debian, respectively priority -= 1 def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs def _set_description(self, node): ''' Create a system description. :return: ''' hostname = socket.getfqdn() or platform.node() descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = "salt.modules.node on {0}".format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
KiwiExporter._get_user_groups
python
def _get_user_groups(self, user): ''' Get user groups. :param user: :return: ''' return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name]
Get user groups. :param user: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L128-L136
null
class KiwiExporter(object): ''' Exports system description as Kiwi configuration. ''' def __init__(self, grains, format): self.__grains__ = grains self.format = format self._data = type('data', (), {}) self.name = None def load(self, **descr): ''' Load data by keys. :param data: :return: ''' for obj, data in descr.items(): setattr(self._data, obj, data) return self def export(self, name): ''' Export to the Kiwi config.xml as text. :return: ''' self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString( etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n") if line.strip()]) def _get_package_manager(self): ''' Get package manager. :return: ''' ret = None if self.__grains__.get('os_family') in ('Kali', 'Debian'): ret = 'apt-get' elif self.__grains__.get('os_family', '') == 'Suse': ret = 'zypper' elif self.__grains__.get('os_family', '') == 'redhat': ret = 'yum' if ret is None: raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret def _set_preferences(self, node): ''' Set preferences. :return: ''' pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for disk_id, disk_data in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', disk_data.get('type') or 'ext3') break p_type.set('installiso', 'true') p_type.set('boot', "vmxboot/suse-leap42.1") p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref def _set_users(self, node): ''' Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return: ''' # Get real local users with the local passwords shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): login, pwd = sh_line.split(":")[:2] if pwd and pwd[0] not in '!*': shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if ps_line[0] in shadow: shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for u_name, u_data in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node def _set_repositories(self, node): ''' Create repositories. :param node: :return: ''' priority = 99 for repo_id, repo_data in self._data.software.get('repositories', {}).items(): if type(repo_data) == list: repo_data = repo_data[0] if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively uri = repo_data.get('baseurl', repo_data.get('uri')) if not uri: continue repo = etree.SubElement(node, 'repository') if self.__grains__.get('os_family') in ('Kali', 'Debian'): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if self.__grains__.get('os_family', '') == 'Suse': repo.set('type', 'yast2') # TODO: Check for options! repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) # RPM and Debian, respectively priority -= 1 def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs def _set_description(self, node): ''' Create a system description. :return: ''' hostname = socket.getfqdn() or platform.node() descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = "salt.modules.node on {0}".format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
KiwiExporter._set_users
python
def _set_users(self, node): ''' Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return: ''' # Get real local users with the local passwords shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): login, pwd = sh_line.split(":")[:2] if pwd and pwd[0] not in '!*': shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if ps_line[0] in shadow: shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for u_name, u_data in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node
Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L138-L177
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n" ]
class KiwiExporter(object): ''' Exports system description as Kiwi configuration. ''' def __init__(self, grains, format): self.__grains__ = grains self.format = format self._data = type('data', (), {}) self.name = None def load(self, **descr): ''' Load data by keys. :param data: :return: ''' for obj, data in descr.items(): setattr(self._data, obj, data) return self def export(self, name): ''' Export to the Kiwi config.xml as text. :return: ''' self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString( etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n") if line.strip()]) def _get_package_manager(self): ''' Get package manager. :return: ''' ret = None if self.__grains__.get('os_family') in ('Kali', 'Debian'): ret = 'apt-get' elif self.__grains__.get('os_family', '') == 'Suse': ret = 'zypper' elif self.__grains__.get('os_family', '') == 'redhat': ret = 'yum' if ret is None: raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret def _set_preferences(self, node): ''' Set preferences. :return: ''' pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for disk_id, disk_data in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', disk_data.get('type') or 'ext3') break p_type.set('installiso', 'true') p_type.set('boot', "vmxboot/suse-leap42.1") p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref def _get_user_groups(self, user): ''' Get user groups. :param user: :return: ''' return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name] def _set_repositories(self, node): ''' Create repositories. :param node: :return: ''' priority = 99 for repo_id, repo_data in self._data.software.get('repositories', {}).items(): if type(repo_data) == list: repo_data = repo_data[0] if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively uri = repo_data.get('baseurl', repo_data.get('uri')) if not uri: continue repo = etree.SubElement(node, 'repository') if self.__grains__.get('os_family') in ('Kali', 'Debian'): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if self.__grains__.get('os_family', '') == 'Suse': repo.set('type', 'yast2') # TODO: Check for options! repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) # RPM and Debian, respectively priority -= 1 def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs def _set_description(self, node): ''' Create a system description. :return: ''' hostname = socket.getfqdn() or platform.node() descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = "salt.modules.node on {0}".format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
KiwiExporter._set_repositories
python
def _set_repositories(self, node): ''' Create repositories. :param node: :return: ''' priority = 99 for repo_id, repo_data in self._data.software.get('repositories', {}).items(): if type(repo_data) == list: repo_data = repo_data[0] if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively uri = repo_data.get('baseurl', repo_data.get('uri')) if not uri: continue repo = etree.SubElement(node, 'repository') if self.__grains__.get('os_family') in ('Kali', 'Debian'): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if self.__grains__.get('os_family', '') == 'Suse': repo.set('type', 'yast2') # TODO: Check for options! repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) # RPM and Debian, respectively priority -= 1
Create repositories. :param node: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L179-L206
null
class KiwiExporter(object): ''' Exports system description as Kiwi configuration. ''' def __init__(self, grains, format): self.__grains__ = grains self.format = format self._data = type('data', (), {}) self.name = None def load(self, **descr): ''' Load data by keys. :param data: :return: ''' for obj, data in descr.items(): setattr(self._data, obj, data) return self def export(self, name): ''' Export to the Kiwi config.xml as text. :return: ''' self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString( etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n") if line.strip()]) def _get_package_manager(self): ''' Get package manager. :return: ''' ret = None if self.__grains__.get('os_family') in ('Kali', 'Debian'): ret = 'apt-get' elif self.__grains__.get('os_family', '') == 'Suse': ret = 'zypper' elif self.__grains__.get('os_family', '') == 'redhat': ret = 'yum' if ret is None: raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret def _set_preferences(self, node): ''' Set preferences. :return: ''' pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for disk_id, disk_data in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', disk_data.get('type') or 'ext3') break p_type.set('installiso', 'true') p_type.set('boot', "vmxboot/suse-leap42.1") p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref def _get_user_groups(self, user): ''' Get user groups. :param user: :return: ''' return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name] def _set_users(self, node): ''' Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return: ''' # Get real local users with the local passwords shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): login, pwd = sh_line.split(":")[:2] if pwd and pwd[0] not in '!*': shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if ps_line[0] in shadow: shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for u_name, u_data in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs def _set_description(self, node): ''' Create a system description. :return: ''' hostname = socket.getfqdn() or platform.node() descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = "salt.modules.node on {0}".format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
KiwiExporter._set_packages
python
def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs
Set packages and collections. :param node: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L208-L227
null
class KiwiExporter(object): ''' Exports system description as Kiwi configuration. ''' def __init__(self, grains, format): self.__grains__ = grains self.format = format self._data = type('data', (), {}) self.name = None def load(self, **descr): ''' Load data by keys. :param data: :return: ''' for obj, data in descr.items(): setattr(self._data, obj, data) return self def export(self, name): ''' Export to the Kiwi config.xml as text. :return: ''' self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString( etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n") if line.strip()]) def _get_package_manager(self): ''' Get package manager. :return: ''' ret = None if self.__grains__.get('os_family') in ('Kali', 'Debian'): ret = 'apt-get' elif self.__grains__.get('os_family', '') == 'Suse': ret = 'zypper' elif self.__grains__.get('os_family', '') == 'redhat': ret = 'yum' if ret is None: raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret def _set_preferences(self, node): ''' Set preferences. :return: ''' pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for disk_id, disk_data in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', disk_data.get('type') or 'ext3') break p_type.set('installiso', 'true') p_type.set('boot', "vmxboot/suse-leap42.1") p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref def _get_user_groups(self, user): ''' Get user groups. :param user: :return: ''' return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name] def _set_users(self, node): ''' Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return: ''' # Get real local users with the local passwords shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): login, pwd = sh_line.split(":")[:2] if pwd and pwd[0] not in '!*': shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if ps_line[0] in shadow: shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for u_name, u_data in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node def _set_repositories(self, node): ''' Create repositories. :param node: :return: ''' priority = 99 for repo_id, repo_data in self._data.software.get('repositories', {}).items(): if type(repo_data) == list: repo_data = repo_data[0] if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively uri = repo_data.get('baseurl', repo_data.get('uri')) if not uri: continue repo = etree.SubElement(node, 'repository') if self.__grains__.get('os_family') in ('Kali', 'Debian'): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if self.__grains__.get('os_family', '') == 'Suse': repo.set('type', 'yast2') # TODO: Check for options! repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) # RPM and Debian, respectively priority -= 1 def _set_description(self, node): ''' Create a system description. :return: ''' hostname = socket.getfqdn() or platform.node() descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = "salt.modules.node on {0}".format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
KiwiExporter._set_description
python
def _set_description(self, node): ''' Create a system description. :return: ''' hostname = socket.getfqdn() or platform.node() descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = "salt.modules.node on {0}".format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr
Create a system description. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L229-L245
null
class KiwiExporter(object): ''' Exports system description as Kiwi configuration. ''' def __init__(self, grains, format): self.__grains__ = grains self.format = format self._data = type('data', (), {}) self.name = None def load(self, **descr): ''' Load data by keys. :param data: :return: ''' for obj, data in descr.items(): setattr(self._data, obj, data) return self def export(self, name): ''' Export to the Kiwi config.xml as text. :return: ''' self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString( etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n") if line.strip()]) def _get_package_manager(self): ''' Get package manager. :return: ''' ret = None if self.__grains__.get('os_family') in ('Kali', 'Debian'): ret = 'apt-get' elif self.__grains__.get('os_family', '') == 'Suse': ret = 'zypper' elif self.__grains__.get('os_family', '') == 'redhat': ret = 'yum' if ret is None: raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret def _set_preferences(self, node): ''' Set preferences. :return: ''' pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for disk_id, disk_data in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', disk_data.get('type') or 'ext3') break p_type.set('installiso', 'true') p_type.set('boot', "vmxboot/suse-leap42.1") p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref def _get_user_groups(self, user): ''' Get user groups. :param user: :return: ''' return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name] def _set_users(self, node): ''' Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return: ''' # Get real local users with the local passwords shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): login, pwd = sh_line.split(":")[:2] if pwd and pwd[0] not in '!*': shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if ps_line[0] in shadow: shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for u_name, u_data in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node def _set_repositories(self, node): ''' Create repositories. :param node: :return: ''' priority = 99 for repo_id, repo_data in self._data.software.get('repositories', {}).items(): if type(repo_data) == list: repo_data = repo_data[0] if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively uri = repo_data.get('baseurl', repo_data.get('uri')) if not uri: continue repo = etree.SubElement(node, 'repository') if self.__grains__.get('os_family') in ('Kali', 'Debian'): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if self.__grains__.get('os_family', '') == 'Suse': repo.set('type', 'yast2') # TODO: Check for options! repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) # RPM and Debian, respectively priority -= 1 def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
KiwiExporter._create_doc
python
def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
Create document. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L247-L257
null
class KiwiExporter(object): ''' Exports system description as Kiwi configuration. ''' def __init__(self, grains, format): self.__grains__ = grains self.format = format self._data = type('data', (), {}) self.name = None def load(self, **descr): ''' Load data by keys. :param data: :return: ''' for obj, data in descr.items(): setattr(self._data, obj, data) return self def export(self, name): ''' Export to the Kiwi config.xml as text. :return: ''' self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString( etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n") if line.strip()]) def _get_package_manager(self): ''' Get package manager. :return: ''' ret = None if self.__grains__.get('os_family') in ('Kali', 'Debian'): ret = 'apt-get' elif self.__grains__.get('os_family', '') == 'Suse': ret = 'zypper' elif self.__grains__.get('os_family', '') == 'redhat': ret = 'yum' if ret is None: raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret def _set_preferences(self, node): ''' Set preferences. :return: ''' pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for disk_id, disk_data in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', disk_data.get('type') or 'ext3') break p_type.set('installiso', 'true') p_type.set('boot', "vmxboot/suse-leap42.1") p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref def _get_user_groups(self, user): ''' Get user groups. :param user: :return: ''' return [g.gr_name for g in grp.getgrall() if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name] def _set_users(self, node): ''' Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return: ''' # Get real local users with the local passwords shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): login, pwd = sh_line.split(":")[:2] if pwd and pwd[0] not in '!*': shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if ps_line[0] in shadow: shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for u_name, u_data in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node def _set_repositories(self, node): ''' Create repositories. :param node: :return: ''' priority = 99 for repo_id, repo_data in self._data.software.get('repositories', {}).items(): if type(repo_data) == list: repo_data = repo_data[0] if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively uri = repo_data.get('baseurl', repo_data.get('uri')) if not uri: continue repo = etree.SubElement(node, 'repository') if self.__grains__.get('os_family') in ('Kali', 'Debian'): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if self.__grains__.get('os_family', '') == 'Suse': repo.set('type', 'yast2') # TODO: Check for options! repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) # RPM and Debian, respectively priority -= 1 def _set_packages(self, node): ''' Set packages and collections. :param node: :return: ''' pkgs = etree.SubElement(node, 'packages') for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) # Add collections (SUSE) if self.__grains__.get('os_family', '') == 'Suse': for ptn_id, ptn_data in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs def _set_description(self, node): ''' Create a system description. :return: ''' hostname = socket.getfqdn() or platform.node() descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = "salt.modules.node on {0}".format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr
saltstack/salt
salt/sdb/vault.py
set_
python
def set_(key, value, profile=None): ''' Set a key/value pair in the vault service ''' if '?' in key: __utils__['versions.warn_until']( 'Neon', ( 'Using ? to seperate between the path and key for vault has been deprecated ' 'and will be removed in {version}. Please just use a /.' ), ) path, key = key.split('?') else: path, key = key.rsplit('/', 1) try: url = 'v1/{0}'.format(path) data = {key: value} response = __utils__['vault.make_request']( 'POST', url, profile, json=data) if response.status_code != 204: response.raise_for_status() return True except Exception as e: log.error('Failed to write secret! %s: %s', type(e).__name__, e) raise salt.exceptions.CommandExecutionError(e)
Set a key/value pair in the vault service
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/sdb/vault.py#L55-L85
null
# -*- coding: utf-8 -*- ''' Vault SDB Module :maintainer: SaltStack :maturity: New :platform: all .. versionadded:: 2016.11.0 This module allows access to Hashicorp Vault using an ``sdb://`` URI. Base configuration instructions are documented in the execution module docs. Below are noted extra configuration required for the sdb module, but the base configuration must also be completed. Like all sdb modules, the vault module requires a configuration profile to be configured in either the minion configuration file or a pillar. This profile requires only setting the ``driver`` parameter to ``vault``: .. code-block:: yaml myvault: driver: vault Once configured you can access data using a URL such as: .. code-block:: yaml password: sdb://myvault/secret/passwords/mypassword In this URL, ``myvault`` refers to the configuration profile, ``secret/passwords`` is the path where the data resides, and ``mypassword`` is the key of the data to return. The above URI is analogous to running the following vault command: .. code-block:: bash $ vault read -field=mypassword secret/passwords ''' # import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import salt.exceptions log = logging.getLogger(__name__) __func_alias__ = { 'set_': 'set' } def get(key, profile=None): ''' Get a value from the vault service ''' if '?' in key: __utils__['versions.warn_until']( 'Neon', ( 'Using ? to seperate between the path and key for vault has been deprecated ' 'and will be removed in {version}. Please just use a /.' ), ) path, key = key.split('?') else: path, key = key.rsplit('/', 1) try: url = 'v1/{0}'.format(path) response = __utils__['vault.make_request']('GET', url, profile) if response.status_code != 200: response.raise_for_status() data = response.json()['data'] return data[key] except Exception as e: log.error('Failed to read secret! %s: %s', type(e).__name__, e) raise salt.exceptions.CommandExecutionError(e)
saltstack/salt
salt/thorium/timer.py
hold
python
def hold(name, seconds): ''' Wait for a given period of time, then fire a result of True, requiring this state allows for an action to be blocked for evaluation based on time USAGE: .. code-block:: yaml hold_on_a_moment: timer.hold: - seconds: 30 ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} start = time.time() if 'timer' not in __context__: __context__['timer'] = {} if name not in __context__['timer']: __context__['timer'][name] = start if (start - __context__['timer'][name]) > seconds: ret['result'] = True __context__['timer'][name] = start return ret
Wait for a given period of time, then fire a result of True, requiring this state allows for an action to be blocked for evaluation based on time USAGE: .. code-block:: yaml hold_on_a_moment: timer.hold: - seconds: 30
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/timer.py#L11-L37
null
# -*- coding: utf-8 -*- ''' Allow for flow based timers. These timers allow for a sleep to exist across multiple runs of the flow ''' from __future__ import absolute_import, print_function, unicode_literals import time
saltstack/salt
salt/modules/mongodb.py
_connect
python
def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn
Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L49-L74
null
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
_to_dict
python
def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects
Potentially interprets a string as JSON for usage with mongo
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L77-L88
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
db_list
python
def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err)
List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L91-L110
[ "def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n Returns a tuple of (user, host, port) with config, pillar, or default\n values assigned to missing values.\n '''\n if not user:\n user = __salt__['config.option']('mongodb.user')\n if not password:\n password = __salt__['config.option']('mongodb.password')\n if not host:\n host = __salt__['config.option']('mongodb.host')\n if not port:\n port = __salt__['config.option']('mongodb.port')\n if not authdb:\n authdb = database\n\n try:\n conn = pymongo.MongoClient(host=host, port=port)\n mdb = pymongo.database.Database(conn, database)\n if user and password:\n mdb.authenticate(user, password, source=authdb)\n except pymongo.errors.PyMongoError:\n log.error('Error connecting to database %s', database)\n return False\n\n return conn\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
db_exists
python
def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs
Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L113-L128
[ "def db_list(user=None, password=None, host=None, port=None, authdb=None):\n '''\n List all MongoDB databases\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' mongodb.db_list <user> <password> <host> <port>\n '''\n conn = _connect(user, password, host, port, authdb=authdb)\n if not conn:\n return 'Failed to connect to mongo database'\n\n try:\n log.info('Listing databases')\n return conn.database_names()\n except pymongo.errors.PyMongoError as err:\n log.error(err)\n return six.text_type(err)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
db_remove
python
def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True
Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L131-L152
[ "def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n Returns a tuple of (user, host, port) with config, pillar, or default\n values assigned to missing values.\n '''\n if not user:\n user = __salt__['config.option']('mongodb.user')\n if not password:\n password = __salt__['config.option']('mongodb.password')\n if not host:\n host = __salt__['config.option']('mongodb.host')\n if not port:\n port = __salt__['config.option']('mongodb.port')\n if not authdb:\n authdb = database\n\n try:\n conn = pymongo.MongoClient(host=host, port=port)\n mdb = pymongo.database.Database(conn, database)\n if user and password:\n mdb.authenticate(user, password, source=authdb)\n except pymongo.errors.PyMongoError:\n log.error('Error connecting to database %s', database)\n return False\n\n return conn\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
version
python
def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err)
Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L159-L180
[ "def _version(mdb):\n return mdb.command('buildInfo')['version']\n", "def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n Returns a tuple of (user, host, port) with config, pillar, or default\n values assigned to missing values.\n '''\n if not user:\n user = __salt__['config.option']('mongodb.user')\n if not password:\n password = __salt__['config.option']('mongodb.password')\n if not host:\n host = __salt__['config.option']('mongodb.host')\n if not port:\n port = __salt__['config.option']('mongodb.port')\n if not authdb:\n authdb = database\n\n try:\n conn = pymongo.MongoClient(host=host, port=port)\n mdb = pymongo.database.Database(conn, database)\n if user and password:\n mdb.authenticate(user, password, source=authdb)\n except pymongo.errors.PyMongoError:\n log.error('Error connecting to database %s', database)\n return False\n\n return conn\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
user_list
python
def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err)
List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L208-L245
[ "def _version(mdb):\n return mdb.command('buildInfo')['version']\n", "def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n Returns a tuple of (user, host, port) with config, pillar, or default\n values assigned to missing values.\n '''\n if not user:\n user = __salt__['config.option']('mongodb.user')\n if not password:\n password = __salt__['config.option']('mongodb.password')\n if not host:\n host = __salt__['config.option']('mongodb.host')\n if not port:\n port = __salt__['config.option']('mongodb.port')\n if not authdb:\n authdb = database\n\n try:\n conn = pymongo.MongoClient(host=host, port=port)\n mdb = pymongo.database.Database(conn, database)\n if user and password:\n mdb.authenticate(user, password, source=authdb)\n except pymongo.errors.PyMongoError:\n log.error('Error connecting to database %s', database)\n return False\n\n return conn\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
user_exists
python
def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False
Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L248-L268
[ "def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n List users of a MongoDB database\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' mongodb.user_list <user> <password> <host> <port> <database>\n '''\n conn = _connect(user, password, host, port, authdb=authdb)\n if not conn:\n return 'Failed to connect to mongo database'\n\n try:\n log.info('Listing users')\n mdb = pymongo.database.Database(conn, database)\n\n output = []\n mongodb_version = _version(mdb)\n\n if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'):\n for user in mdb.command('usersInfo')['users']:\n output.append(\n {'user': user['user'],\n 'roles': user['roles']}\n )\n else:\n for user in mdb.system.users.find():\n output.append(\n {'user': user['user'],\n 'readOnly': user.get('readOnly', 'None')}\n )\n return output\n\n except pymongo.errors.PyMongoError as err:\n log.error('Listing users failed with error: %s', err)\n return six.text_type(err)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
user_create
python
def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True
Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L271-L296
[ "def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n Returns a tuple of (user, host, port) with config, pillar, or default\n values assigned to missing values.\n '''\n if not user:\n user = __salt__['config.option']('mongodb.user')\n if not password:\n password = __salt__['config.option']('mongodb.password')\n if not host:\n host = __salt__['config.option']('mongodb.host')\n if not port:\n port = __salt__['config.option']('mongodb.port')\n if not authdb:\n authdb = database\n\n try:\n conn = pymongo.MongoClient(host=host, port=port)\n mdb = pymongo.database.Database(conn, database)\n if user and password:\n mdb.authenticate(user, password, source=authdb)\n except pymongo.errors.PyMongoError:\n log.error('Error connecting to database %s', database)\n return False\n\n return conn\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
user_remove
python
def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True
Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L299-L322
[ "def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n Returns a tuple of (user, host, port) with config, pillar, or default\n values assigned to missing values.\n '''\n if not user:\n user = __salt__['config.option']('mongodb.user')\n if not password:\n password = __salt__['config.option']('mongodb.password')\n if not host:\n host = __salt__['config.option']('mongodb.host')\n if not port:\n port = __salt__['config.option']('mongodb.port')\n if not authdb:\n authdb = database\n\n try:\n conn = pymongo.MongoClient(host=host, port=port)\n mdb = pymongo.database.Database(conn, database)\n if user and password:\n mdb.authenticate(user, password, source=authdb)\n except pymongo.errors.PyMongoError:\n log.error('Error connecting to database %s', database)\n return False\n\n return conn\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
user_roles_exists
python
def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False
Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L325-L360
[ "def _to_dict(objects):\n '''\n Potentially interprets a string as JSON for usage with mongo\n '''\n try:\n if isinstance(objects, six.string_types):\n objects = salt.utils.json.loads(objects)\n except ValueError as err:\n log.error(\"Could not parse objects: %s\", err)\n raise err\n\n return objects\n", "def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n List users of a MongoDB database\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' mongodb.user_list <user> <password> <host> <port> <database>\n '''\n conn = _connect(user, password, host, port, authdb=authdb)\n if not conn:\n return 'Failed to connect to mongo database'\n\n try:\n log.info('Listing users')\n mdb = pymongo.database.Database(conn, database)\n\n output = []\n mongodb_version = _version(mdb)\n\n if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'):\n for user in mdb.command('usersInfo')['users']:\n output.append(\n {'user': user['user'],\n 'roles': user['roles']}\n )\n else:\n for user in mdb.system.users.find():\n output.append(\n {'user': user['user'],\n 'readOnly': user.get('readOnly', 'None')}\n )\n return output\n\n except pymongo.errors.PyMongoError as err:\n log.error('Listing users failed with error: %s', err)\n return six.text_type(err)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
user_grant_roles
python
def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True
Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L363-L395
[ "def _to_dict(objects):\n '''\n Potentially interprets a string as JSON for usage with mongo\n '''\n try:\n if isinstance(objects, six.string_types):\n objects = salt.utils.json.loads(objects)\n except ValueError as err:\n log.error(\"Could not parse objects: %s\", err)\n raise err\n\n return objects\n", "def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n Returns a tuple of (user, host, port) with config, pillar, or default\n values assigned to missing values.\n '''\n if not user:\n user = __salt__['config.option']('mongodb.user')\n if not password:\n password = __salt__['config.option']('mongodb.password')\n if not host:\n host = __salt__['config.option']('mongodb.host')\n if not port:\n port = __salt__['config.option']('mongodb.port')\n if not authdb:\n authdb = database\n\n try:\n conn = pymongo.MongoClient(host=host, port=port)\n mdb = pymongo.database.Database(conn, database)\n if user and password:\n mdb.authenticate(user, password, source=authdb)\n except pymongo.errors.PyMongoError:\n log.error('Error connecting to database %s', database)\n return False\n\n return conn\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
insert
python
def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err
Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L433-L462
[ "def _to_dict(objects):\n '''\n Potentially interprets a string as JSON for usage with mongo\n '''\n try:\n if isinstance(objects, six.string_types):\n objects = salt.utils.json.loads(objects)\n except ValueError as err:\n log.error(\"Could not parse objects: %s\", err)\n raise err\n\n return objects\n", "def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n Returns a tuple of (user, host, port) with config, pillar, or default\n values assigned to missing values.\n '''\n if not user:\n user = __salt__['config.option']('mongodb.user')\n if not password:\n password = __salt__['config.option']('mongodb.password')\n if not host:\n host = __salt__['config.option']('mongodb.host')\n if not port:\n port = __salt__['config.option']('mongodb.port')\n if not authdb:\n authdb = database\n\n try:\n conn = pymongo.MongoClient(host=host, port=port)\n mdb = pymongo.database.Database(conn, database)\n if user and password:\n mdb.authenticate(user, password, source=authdb)\n except pymongo.errors.PyMongoError:\n log.error('Error connecting to database %s', database)\n return False\n\n return conn\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
update_one
python
def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err
Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L465-L530
[ "def find(collection, query=None, user=None, password=None,\n host=None, port=None, database='admin', authdb=None):\n '''\n Find an object or list of objects in a collection\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' mongodb.find mycollection '[{\"foo\": \"FOO\", \"bar\": \"BAR\"}]' <user> <password> <host> <port> <database>\n\n '''\n conn = _connect(user, password, host, port, database, authdb)\n if not conn:\n return 'Failed to connect to mongo database'\n\n try:\n query = _to_dict(query)\n except Exception as err:\n return err\n\n try:\n log.info(\"Searching for %r in %s\", query, collection)\n mdb = pymongo.database.Database(conn, database)\n col = getattr(mdb, collection)\n ret = col.find(query)\n return list(ret)\n except pymongo.errors.PyMongoError as err:\n log.error(\"Searching objects failed with error: %s\", err)\n return err\n", "def _to_dict(objects):\n '''\n Potentially interprets a string as JSON for usage with mongo\n '''\n try:\n if isinstance(objects, six.string_types):\n objects = salt.utils.json.loads(objects)\n except ValueError as err:\n log.error(\"Could not parse objects: %s\", err)\n raise err\n\n return objects\n", "def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n Returns a tuple of (user, host, port) with config, pillar, or default\n values assigned to missing values.\n '''\n if not user:\n user = __salt__['config.option']('mongodb.user')\n if not password:\n password = __salt__['config.option']('mongodb.password')\n if not host:\n host = __salt__['config.option']('mongodb.host')\n if not port:\n port = __salt__['config.option']('mongodb.port')\n if not authdb:\n authdb = database\n\n try:\n conn = pymongo.MongoClient(host=host, port=port)\n mdb = pymongo.database.Database(conn, database)\n if user and password:\n mdb.authenticate(user, password, source=authdb)\n except pymongo.errors.PyMongoError:\n log.error('Error connecting to database %s', database)\n return False\n\n return conn\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/modules/mongodb.py
find
python
def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err
Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mongodb.py#L533-L562
[ "def _to_dict(objects):\n '''\n Potentially interprets a string as JSON for usage with mongo\n '''\n try:\n if isinstance(objects, six.string_types):\n objects = salt.utils.json.loads(objects)\n except ValueError as err:\n log.error(\"Could not parse objects: %s\", err)\n raise err\n\n return objects\n", "def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None):\n '''\n Returns a tuple of (user, host, port) with config, pillar, or default\n values assigned to missing values.\n '''\n if not user:\n user = __salt__['config.option']('mongodb.user')\n if not password:\n password = __salt__['config.option']('mongodb.password')\n if not host:\n host = __salt__['config.option']('mongodb.host')\n if not port:\n port = __salt__['config.option']('mongodb.port')\n if not authdb:\n authdb = database\n\n try:\n conn = pymongo.MongoClient(host=host, port=port)\n mdb = pymongo.database.Database(conn, database)\n if user and password:\n mdb.authenticate(user, password, source=authdb)\n except pymongo.errors.PyMongoError:\n log.error('Error connecting to database %s', database)\n return False\n\n return conn\n" ]
# -*- coding: utf-8 -*- ''' Module to provide MongoDB functionality to Salt :configuration: This module uses PyMongo, and accepts configuration details as parameters as well as configuration settings:: mongodb.host: 'localhost' mongodb.port: 27017 mongodb.user: '' mongodb.password: '' This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import re # Import salt libs import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import get_error_message as _get_error_message # Import third party libs from salt.ext import six try: import pymongo HAS_MONGODB = True except ImportError: HAS_MONGODB = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if pymongo is installed ''' if HAS_MONGODB: return 'mongodb' else: return (False, 'The mongodb execution module cannot be loaded: the pymongo library is not available.') def _connect(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Returns a tuple of (user, host, port) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('mongodb.user') if not password: password = __salt__['config.option']('mongodb.password') if not host: host = __salt__['config.option']('mongodb.host') if not port: port = __salt__['config.option']('mongodb.port') if not authdb: authdb = database try: conn = pymongo.MongoClient(host=host, port=port) mdb = pymongo.database.Database(conn, database) if user and password: mdb.authenticate(user, password, source=authdb) except pymongo.errors.PyMongoError: log.error('Error connecting to database %s', database) return False return conn def _to_dict(objects): ''' Potentially interprets a string as JSON for usage with mongo ''' try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects def db_list(user=None, password=None, host=None, port=None, authdb=None): ''' List all MongoDB databases CLI Example: .. code-block:: bash salt '*' mongodb.db_list <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing databases') return conn.database_names() except pymongo.errors.PyMongoError as err: log.error(err) return six.text_type(err) def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs def db_remove(name, user=None, password=None, host=None, port=None, authdb=None): ''' Remove a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.db_remove <name> <user> <password> <host> <port> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing database %s', name) conn.drop_database(name) except pymongo.errors.PyMongoError as err: log.error('Removing database %s failed with error: %s', name, err) return six.text_type(err) return True def _version(mdb): return mdb.command('buildInfo')['version'] def version(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get MongoDB instance version CLI Example: .. code-block:: bash salt '*' mongodb.version <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) return _version(mdb) except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_find(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Get single user from MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) log.error(err_msg) return (False, err_msg) mdb = pymongo.database.Database(conn, database) try: return mdb.command("usersInfo", name)["users"] except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return (False, six.text_type(err)) def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' List users of a MongoDB database CLI Example: .. code-block:: bash salt '*' mongodb.user_list <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: log.info('Listing users') mdb = pymongo.database.Database(conn, database) output = [] mongodb_version = _version(mdb) if _LooseVersion(mongodb_version) >= _LooseVersion('2.6'): for user in mdb.command('usersInfo')['users']: output.append( {'user': user['user'], 'roles': user['roles']} ) else: for user in mdb.system.users.find(): output.append( {'user': user['user'], 'readOnly': user.get('readOnly', 'None')} ) return output except pymongo.errors.PyMongoError as err: log.error('Listing users failed with error: %s', err) return six.text_type(err) def user_exists(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Checks if a user exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.user_exists <name> <user> <password> <host> <port> <database> ''' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): return True return False def user_create(name, passwd, user=None, password=None, host=None, port=None, database='admin', authdb=None, roles=None): ''' Create a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' if not roles: roles = [] try: log.info('Creating user %s', name) mdb = pymongo.database.Database(conn, database) mdb.add_user(name, passwd, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: # if the role was provided in the shortened form, we convert it to a long form if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False def user_grant_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Grant one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_grant_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_grant_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Granting roles %s to user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("grantRolesToUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Granting roles %s to user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def user_revoke_roles(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): ''' Revoke one or many roles to a MongoDB user CLI Examples: .. code-block:: bash salt '*' mongodb.user_revoke_roles johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_revoke_roles janedoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: return 'Failed to connect to mongo database' try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' try: log.info('Revoking roles %s from user %s', roles, name) mdb = pymongo.database.Database(conn, database) mdb.command("revokeRolesFromUser", name, roles=roles) except pymongo.errors.PyMongoError as err: log.error('Revoking roles %s from user %s failed with error: %s', roles, name, err) return six.text_type(err) return True def insert(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Insert an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.insert '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" try: objects = _to_dict(objects) except Exception as err: return err try: log.info("Inserting %r into %s.%s", objects, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.insert(objects) return ids except pymongo.errors.PyMongoError as err: log.error("Inserting objects %r failed with error %s", objects, err) return err def update_one(objects, collection, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Update an object into a collection http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' mongodb.update_one '{"_id": "my_minion"} {"bar": "BAR"}' mycollection <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return "Failed to connect to mongo database" objects = six.text_type(objects) objs = re.split(r'}\s+{', objects) if len(objs) is not 2: return "Your request does not contain a valid " + \ "'{_\"id\": \"my_id\"} {\"my_doc\": \"my_val\"}'" objs[0] = objs[0] + '}' objs[1] = '{' + objs[1] document = [] for obj in objs: try: obj = _to_dict(obj) document.append(obj) except Exception as err: return err _id_field = document[0] _update_doc = document[1] # need a string to perform the test, so using objs[0] test_f = find(collection, objs[0], user, password, host, port, database, authdb) if not isinstance(test_f, list): return 'The find result is not well formatted. An error appears; cannot update.' elif not test_f: return 'Did not find any result. You should try an insert before.' elif len(test_f) > 1: return 'Too many results. Please try to be more specific.' else: try: log.info("Updating %r into %s.%s", _id_field, database, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ids = col.update_one(_id_field, {'$set': _update_doc}) nb_mod = ids.modified_count return "{0} objects updated".format(nb_mod) except pymongo.errors.PyMongoError as err: log.error('Updating object %s failed with error %s', objects, err) return err def remove(collection, query=None, user=None, password=None, host=None, port=None, database='admin', w=1, authdb=None): ''' Remove an object or list of objects into a collection CLI Example: .. code-block:: bash salt '*' mongodb.remove mycollection '[{"foo": "FOO", "bar": "BAR"}, {"foo": "BAZ", "bar": "BAM"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return _get_error_message(err) try: log.info("Removing %r from %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.remove(query, w=w) return "{0} objects removed".format(ret['n']) except pymongo.errors.PyMongoError as err: log.error("Removing objects failed with error: %s", _get_error_message(err)) return _get_error_message(err)
saltstack/salt
salt/states/infoblox_host_record.py
present
python
def present(name=None, data=None, ensure_data=True, **api_opts): ''' This will ensure that a host with the provided name exists. This will try to ensure that the state of the host matches the given data If the host is not found then one will be created. When trying to update a hostname ensure `name` is set to the hostname of the current record. You can give a new name in the `data.name`. Avoid race conditions, use func:nextavailableip: - func:nextavailableip:network/ZG54dfgsrDFEFfsfsLzA:10.0.0.0/8/default - func:nextavailableip:10.0.0.0/8 - func:nextavailableip:10.0.0.0/8,externalconfigure_for_dns - func:nextavailableip:10.0.0.3-10.0.0.10 State Example: .. code-block:: yaml # this would update `original_hostname.example.ca` to changed `data`. infoblox_host_record.present: - name: original_hostname.example.ca - data: {'namhostname.example.cae': 'hostname.example.ca', 'aliases': ['hostname.math.example.ca'], 'extattrs': [{'Business Contact': {'value': 'EXAMPLE@example.ca'}}], 'ipv4addrs': [{'configure_for_dhcp': True, 'ipv4addr': 'func:nextavailableip:129.97.139.0/24', 'mac': '00:50:56:84:6e:ae'}], 'ipv6addrs': [], } ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if data is None: data = {} if 'name' not in data: data.update({'name': name}) obj = __salt__['infoblox.get_host'](name=name, **api_opts) if obj is None: # perhaps the user updated the name obj = __salt__['infoblox.get_host'](name=data['name'], **api_opts) if obj: # warn user that the host name was updated and does not match ret['result'] = False ret['comment'] = 'please update the name: {0} to equal the updated data name {1}'.format(name, data['name']) return ret if obj: if not ensure_data: ret['result'] = True ret['comment'] = 'infoblox record already created (supplied fields not ensured to match)' return ret obj = __salt__['infoblox.get_host_advanced'](name=name, **api_opts) diff = __salt__['infoblox.diff_objects'](data, obj) if not diff: ret['result'] = True ret['comment'] = 'supplied fields already updated (note: removing fields might not update)' return ret if diff: ret['changes'] = {'diff': diff} if __opts__['test']: ret['result'] = None ret['comment'] = 'would attempt to update infoblox record' return ret # replace func:nextavailableip with current ip address if in range # get list of ipaddresses that are defined. obj_addrs = [] if 'ipv4addrs' in obj: for addr in obj['ipv4addrs']: if 'ipv4addr' in addr: obj_addrs.append(addr['ipv4addr']) if 'ipv6addrs' in obj: for addr in obj['ipv6addrs']: if 'ipv6addr' in addr: obj_addrs.append(addr['ipv6addr']) # replace func:nextavailableip: if an ip address is already found in that range. if 'ipv4addrs' in data: for addr in data['ipv4addrs']: if 'ipv4addr' in addr: addrobj = addr['ipv4addr'] if addrobj.startswith('func:nextavailableip:'): found_matches = 0 for ip in obj_addrs: if __salt__['infoblox.is_ipaddr_in_ipfunc_range'](ip, addrobj): addr['ipv4addr'] = ip found_matches += 1 if found_matches > 1: ret['comment'] = 'infoblox record cant updated because ipaddress {0} matches multiple func:nextavailableip'.format(ip) ret['result'] = False return ret new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts) ret['result'] = True ret['comment'] = 'infoblox record fields updated (note: removing fields might not update)' #ret['changes'] = {'diff': diff } return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'would attempt to create infoblox record {0}'.format(name) return ret new_obj_ref = __salt__['infoblox.create_host'](data=data, **api_opts) new_obj = __salt__['infoblox.get_host'](name=name, **api_opts) ret['result'] = True ret['comment'] = 'infoblox record created' ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}} return ret
This will ensure that a host with the provided name exists. This will try to ensure that the state of the host matches the given data If the host is not found then one will be created. When trying to update a hostname ensure `name` is set to the hostname of the current record. You can give a new name in the `data.name`. Avoid race conditions, use func:nextavailableip: - func:nextavailableip:network/ZG54dfgsrDFEFfsfsLzA:10.0.0.0/8/default - func:nextavailableip:10.0.0.0/8 - func:nextavailableip:10.0.0.0/8,externalconfigure_for_dns - func:nextavailableip:10.0.0.3-10.0.0.10 State Example: .. code-block:: yaml # this would update `original_hostname.example.ca` to changed `data`. infoblox_host_record.present: - name: original_hostname.example.ca - data: {'namhostname.example.cae': 'hostname.example.ca', 'aliases': ['hostname.math.example.ca'], 'extattrs': [{'Business Contact': {'value': 'EXAMPLE@example.ca'}}], 'ipv4addrs': [{'configure_for_dhcp': True, 'ipv4addr': 'func:nextavailableip:129.97.139.0/24', 'mac': '00:50:56:84:6e:ae'}], 'ipv6addrs': [], }
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/infoblox_host_record.py#L17-L128
null
# -*- coding: utf-8 -*- ''' Infoblox host record managment. functions accept api_opts: api_verifyssl: verify SSL [default to True or pillar value] api_url: server to connect to [default to pillar value] api_username: [default to pillar value] api_password: [default to pillar value] ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals def absent(name=None, ipv4addr=None, mac=None, **api_opts): ''' Ensure the host with the given Name ipv4addr or mac is removed. State example: .. code-block:: yaml infoblox_host_record.absent: - name: hostname.of.record.to.remove infoblox_host_record.absent: - name: - ipv4addr: 192.168.0.1 infoblox_host_record.absent: - name: - mac: 12:02:12:31:23:43 ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} obj = __salt__['infoblox.get_host'](name=name, ipv4addr=ipv4addr, mac=mac, **api_opts) if not obj: ret['result'] = True ret['comment'] = 'infoblox already removed' return ret if __opts__['test']: ret['result'] = None ret['changes'] = {'old': obj, 'new': 'absent'} return ret if __salt__['infoblox.delete_host'](name=name, mac=mac, **api_opts): ret['result'] = True ret['changes'] = {'old': obj, 'new': 'absent'} return ret
saltstack/salt
salt/proxy/marathon.py
init
python
def init(opts): ''' Perform any needed setup. ''' if CONFIG_BASE_URL in opts['proxy']: CONFIG[CONFIG_BASE_URL] = opts['proxy'][CONFIG_BASE_URL] else: log.error('missing proxy property %s', CONFIG_BASE_URL) log.debug('CONFIG: %s', CONFIG)
Perform any needed setup.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/marathon.py#L43-L51
null
# -*- coding: utf-8 -*- ''' Marathon ======== Proxy minion for managing a Marathon cluster. Dependencies ------------ - :mod:`marathon execution module (salt.modules.marathon) <salt.modules.marathon>` Pillar ------ The marathon proxy configuration requires a 'base_url' property that points to the marathon endpoint: .. code-block:: yaml proxy: proxytype: marathon base_url: http://my-marathon-master.mydomain.com:8080 .. versionadded:: 2015.8.2 ''' from __future__ import absolute_import, print_function, unicode_literals import logging import salt.utils.http __proxyenabled__ = ['marathon'] CONFIG = {} CONFIG_BASE_URL = 'base_url' log = logging.getLogger(__file__) def __virtual__(): return True def ping(): ''' Is the marathon api responding? ''' try: response = salt.utils.http.query( "{0}/ping".format(CONFIG[CONFIG_BASE_URL]), decode_type='plain', decode=True, ) log.debug( 'marathon.info returned successfully: %s', response, ) if 'text' in response and response['text'].strip() == 'pong': return True except Exception as ex: log.error( 'error calling marathon.info with base_url %s: %s', CONFIG[CONFIG_BASE_URL], ex, ) return False def shutdown(opts): ''' For this proxy shutdown is a no-op ''' log.debug('marathon proxy shutdown() called...')
saltstack/salt
salt/proxy/marathon.py
ping
python
def ping(): ''' Is the marathon api responding? ''' try: response = salt.utils.http.query( "{0}/ping".format(CONFIG[CONFIG_BASE_URL]), decode_type='plain', decode=True, ) log.debug( 'marathon.info returned successfully: %s', response, ) if 'text' in response and response['text'].strip() == 'pong': return True except Exception as ex: log.error( 'error calling marathon.info with base_url %s: %s', CONFIG[CONFIG_BASE_URL], ex, ) return False
Is the marathon api responding?
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/marathon.py#L54-L76
null
# -*- coding: utf-8 -*- ''' Marathon ======== Proxy minion for managing a Marathon cluster. Dependencies ------------ - :mod:`marathon execution module (salt.modules.marathon) <salt.modules.marathon>` Pillar ------ The marathon proxy configuration requires a 'base_url' property that points to the marathon endpoint: .. code-block:: yaml proxy: proxytype: marathon base_url: http://my-marathon-master.mydomain.com:8080 .. versionadded:: 2015.8.2 ''' from __future__ import absolute_import, print_function, unicode_literals import logging import salt.utils.http __proxyenabled__ = ['marathon'] CONFIG = {} CONFIG_BASE_URL = 'base_url' log = logging.getLogger(__file__) def __virtual__(): return True def init(opts): ''' Perform any needed setup. ''' if CONFIG_BASE_URL in opts['proxy']: CONFIG[CONFIG_BASE_URL] = opts['proxy'][CONFIG_BASE_URL] else: log.error('missing proxy property %s', CONFIG_BASE_URL) log.debug('CONFIG: %s', CONFIG) def shutdown(opts): ''' For this proxy shutdown is a no-op ''' log.debug('marathon proxy shutdown() called...')
saltstack/salt
salt/modules/glance.py
_auth
python
def _auth(profile=None, api_version=2, **connection_args): ''' Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules ''' __utils__['versions.warn_until']( 'Neon', ( 'The glance module has been deprecated and will be removed in {version}. ' 'Please update to using the glanceng module' ), ) if profile: prefix = profile + ":keystone." else: prefix = "keystone." def get(key, default=None): ''' Checks connection_args, then salt-minion config, falls back to specified default value. ''' return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default)) user = get('user', 'admin') password = get('password', None) tenant = get('tenant', 'admin') tenant_id = get('tenant_id') auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0') insecure = get('insecure', False) admin_token = get('token') region = get('region') ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/') g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile) # The trailing 'v2' causes URLs like thise one: # http://127.0.0.1:9292/v2/v1/images g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl']) if admin_token and api_version != 1 and not password: # If we had a password we could just # ignore the admin-token and move on... raise SaltInvocationError('Only can use keystone admin token ' + 'with Glance API v1') elif password: # Can't use the admin-token anyway kwargs = {'username': user, 'password': password, 'tenant_id': tenant_id, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url, 'region_name': region, 'tenant_name': tenant} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: kwargs['insecure'] = True elif api_version == 1 and admin_token: kwargs = {'token': admin_token, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url} else: raise SaltInvocationError('No credentials to authenticate with.') if HAS_KEYSTONE: log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)', ks_endpoint, kwargs) keystone = kstone.Client(**kwargs) kwargs['token'] = keystone.get_token(keystone.session) # This doesn't realy prevent the password to show up # in the minion log as keystoneclient.session is # logging it anyway when in debug-mode kwargs.pop('password') log.debug('Calling glanceclient.client.Client(%s, %s, **%s)', api_version, g_endpoint_url, kwargs) # may raise exc.HTTPUnauthorized, exc.HTTPNotFound # but we deal with those elsewhere return client.Client(api_version, g_endpoint_url, **kwargs) else: raise NotImplementedError( "Can't retrieve a auth_token without keystone")
Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glance.py#L98-L182
[ "def get(key, default=None):\n '''\n Checks connection_args, then salt-minion config,\n falls back to specified default value.\n '''\n return connection_args.get('connection_' + key,\n __salt__['config.get'](prefix + key, default))\n" ]
# -*- coding: utf-8 -*- ''' Module for handling openstack glance calls. :optdepends: - glanceclient Python adapter :configuration: This module is not usable until the following are specified either in a pillar or in the minion's config file:: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.insecure: False #(optional) keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles: For example:: openstack1: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' openstack2: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' With this configuration in place, any of the glance functions can make use of a configuration profile by declaring it explicitly. For example:: salt '*' glance.image_list profile=openstack1 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re # Import salt libs from salt.exceptions import ( SaltInvocationError ) from salt.version import ( __version__, SaltStackVersion ) from salt.ext import six # is there not SaltStackVersion.current() to get # the version of the salt running this code?? _version_ary = __version__.split('.') CUR_VER = SaltStackVersion(_version_ary[0], _version_ary[1]) BORON = SaltStackVersion.from_name('Boron') # pylint: disable=import-error HAS_GLANCE = False try: from glanceclient import client from glanceclient import exc HAS_GLANCE = True except ImportError: pass # Workaround, as the Glance API v2 requires you to # already have a keystone session token HAS_KEYSTONE = False try: from keystoneclient.v2_0 import client as kstone #import keystoneclient.apiclient.exceptions as kstone_exc HAS_KEYSTONE = True except ImportError: pass import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) import pprint def __virtual__(): ''' Only load this module if glance is installed on this minion. ''' if HAS_GLANCE: return 'glance' return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.') __opts__ = {} def _add_image(collection, image): ''' Add image to given dictionary ''' image_prep = { 'id': image.id, 'name': image.name, 'created_at': image.created_at, 'file': image.file, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'owner': image.owner, 'protected': image.protected, 'status': image.status, 'tags': image.tags, 'updated_at': image.updated_at, 'visibility': image.visibility, } # Those cause AttributeErrors in Icehouse' glanceclient for attr in ['container_format', 'disk_format', 'size']: if attr in image: image_prep[attr] = image[attr] if type(collection) is dict: collection[image.name] = image_prep elif type(collection) is list: collection.append(image_prep) else: msg = '"collection" is {0}'.format(type(collection)) +\ 'instead of dict or list.' log.error(msg) raise TypeError(msg) return collection def image_create(name, location=None, profile=None, visibility=None, container_format='bare', disk_format='raw', protected=None,): ''' Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified. ''' kwargs = {} # valid options for "visibility": v_list = ['public', 'private'] # valid options for "container_format": cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf'] # valid options for "disk_format": df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] kwargs['copy_from'] = location if visibility is not None: if visibility not in v_list: raise SaltInvocationError('"visibility" needs to be one ' + 'of the following: {0}'.format(', '.join(v_list))) elif visibility == 'public': kwargs['is_public'] = True else: kwargs['is_public'] = False else: kwargs['is_public'] = True if container_format not in cf_list: raise SaltInvocationError('"container_format" needs to be ' + 'one of the following: {0}'.format(', '.join(cf_list))) else: kwargs['container_format'] = container_format if disk_format not in df_list: raise SaltInvocationError('"disk_format" needs to be one ' + 'of the following: {0}'.format(', '.join(df_list))) else: kwargs['disk_format'] = disk_format if protected is not None: kwargs['protected'] = protected # Icehouse's glanceclient doesn't have add_location() and # glanceclient.v2 doesn't implement Client.images.create() # in a usable fashion. Thus we have to use v1 for now. g_client = _auth(profile, api_version=1) image = g_client.images.create(name=name, **kwargs) return image_show(image.id, profile=profile) def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos ''' g_client = _auth(profile) image = {'id': False, 'name': None} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image id ' 'for name {0}'.format(name) } elif not name: name = image['name'] try: g_client.images.delete(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } except exc.HTTPForbidden as forbidden: log.error(six.text_type(forbidden)) return { 'result': False, 'comment': six.text_type(forbidden) } return { 'result': True, 'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id), } def image_show(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show ''' g_client = _auth(profile) ret = {} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image ID ' 'for name \'{0}\''.format(name) } try: image = g_client.images.get(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } log.debug( 'Properties of image %s:\n%s', image.name, pprint.PrettyPrinter(indent=4).pformat(image) ) schema = image_schema(profile=profile) if len(schema.keys()) == 1: schema = schema['image'] for key in schema: if key in image: ret[key] = image[key] return ret def image_list(id=None, profile=None, name=None): # pylint: disable=C0103 ''' Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list ''' g_client = _auth(profile) ret = [] for image in g_client.images.list(): if id is None and name is None: _add_image(ret, image) else: if id is not None and id == image.id: _add_image(ret, image) return ret if name == image.name: if name in ret and CUR_VER < BORON: # Not really worth an exception return { 'result': False, 'comment': 'More than one image with ' 'name "{0}"'.format(name) } _add_image(ret, image) log.debug('Returning images: %s', ret) return ret def image_schema(profile=None): ''' Returns names and descriptions of the schema "image"'s properties for this profile's instance of glance CLI Example: .. code-block:: bash salt '*' glance.image_schema ''' return schema_get('image', profile) def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos ''' if id: image = image_show(id=id, profile=profile) if 'result' in image and not image['result']: return image elif len(image) == 1: image = image.values()[0] elif name: img_list = image_list(name=name, profile=profile) if img_list is dict and 'result' in img_list: return img_list elif not img_list: return { 'result': False, 'comment': 'No image with name \'{0}\' ' 'found.'.format(name) } elif len(img_list) == 1: try: image = img_list[0] except KeyError: image = img_list[name] else: raise SaltInvocationError log.debug('Found image:\n%s', image) to_update = {} for key, value in kwargs.items(): if key.startswith('_'): continue if key not in image or image[key] != value: log.debug('add <%s=%s> to to_update', key, value) to_update[key] = value g_client = _auth(profile) updated = g_client.images.update(image['id'], **to_update) return updated def schema_get(name, profile=None): ''' Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos ''' g_client = _auth(profile) schema_props = {} for prop in g_client.schemas.get(name).properties: schema_props[prop.name] = prop.description log.debug( 'Properties of schema %s:\n%s', name, pprint.PrettyPrinter(indent=4).pformat(schema_props) ) return {name: schema_props} def _item_list(profile=None): ''' Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list ''' g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) #ret[item.name] = { # 'name': item.name, # } return ret # The following is a list of functions that need to be incorporated in the # glance module. This list should be updated as functions are added. # image-download Download a specific image. # member-create Share a specific image with a tenant. # member-delete Remove a shared image from a tenant. # member-list Describe sharing permissions by image or tenant.
saltstack/salt
salt/modules/glance.py
_add_image
python
def _add_image(collection, image): ''' Add image to given dictionary ''' image_prep = { 'id': image.id, 'name': image.name, 'created_at': image.created_at, 'file': image.file, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'owner': image.owner, 'protected': image.protected, 'status': image.status, 'tags': image.tags, 'updated_at': image.updated_at, 'visibility': image.visibility, } # Those cause AttributeErrors in Icehouse' glanceclient for attr in ['container_format', 'disk_format', 'size']: if attr in image: image_prep[attr] = image[attr] if type(collection) is dict: collection[image.name] = image_prep elif type(collection) is list: collection.append(image_prep) else: msg = '"collection" is {0}'.format(type(collection)) +\ 'instead of dict or list.' log.error(msg) raise TypeError(msg) return collection
Add image to given dictionary
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glance.py#L185-L216
null
# -*- coding: utf-8 -*- ''' Module for handling openstack glance calls. :optdepends: - glanceclient Python adapter :configuration: This module is not usable until the following are specified either in a pillar or in the minion's config file:: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.insecure: False #(optional) keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles: For example:: openstack1: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' openstack2: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' With this configuration in place, any of the glance functions can make use of a configuration profile by declaring it explicitly. For example:: salt '*' glance.image_list profile=openstack1 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re # Import salt libs from salt.exceptions import ( SaltInvocationError ) from salt.version import ( __version__, SaltStackVersion ) from salt.ext import six # is there not SaltStackVersion.current() to get # the version of the salt running this code?? _version_ary = __version__.split('.') CUR_VER = SaltStackVersion(_version_ary[0], _version_ary[1]) BORON = SaltStackVersion.from_name('Boron') # pylint: disable=import-error HAS_GLANCE = False try: from glanceclient import client from glanceclient import exc HAS_GLANCE = True except ImportError: pass # Workaround, as the Glance API v2 requires you to # already have a keystone session token HAS_KEYSTONE = False try: from keystoneclient.v2_0 import client as kstone #import keystoneclient.apiclient.exceptions as kstone_exc HAS_KEYSTONE = True except ImportError: pass import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) import pprint def __virtual__(): ''' Only load this module if glance is installed on this minion. ''' if HAS_GLANCE: return 'glance' return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.') __opts__ = {} def _auth(profile=None, api_version=2, **connection_args): ''' Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules ''' __utils__['versions.warn_until']( 'Neon', ( 'The glance module has been deprecated and will be removed in {version}. ' 'Please update to using the glanceng module' ), ) if profile: prefix = profile + ":keystone." else: prefix = "keystone." def get(key, default=None): ''' Checks connection_args, then salt-minion config, falls back to specified default value. ''' return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default)) user = get('user', 'admin') password = get('password', None) tenant = get('tenant', 'admin') tenant_id = get('tenant_id') auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0') insecure = get('insecure', False) admin_token = get('token') region = get('region') ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/') g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile) # The trailing 'v2' causes URLs like thise one: # http://127.0.0.1:9292/v2/v1/images g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl']) if admin_token and api_version != 1 and not password: # If we had a password we could just # ignore the admin-token and move on... raise SaltInvocationError('Only can use keystone admin token ' + 'with Glance API v1') elif password: # Can't use the admin-token anyway kwargs = {'username': user, 'password': password, 'tenant_id': tenant_id, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url, 'region_name': region, 'tenant_name': tenant} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: kwargs['insecure'] = True elif api_version == 1 and admin_token: kwargs = {'token': admin_token, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url} else: raise SaltInvocationError('No credentials to authenticate with.') if HAS_KEYSTONE: log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)', ks_endpoint, kwargs) keystone = kstone.Client(**kwargs) kwargs['token'] = keystone.get_token(keystone.session) # This doesn't realy prevent the password to show up # in the minion log as keystoneclient.session is # logging it anyway when in debug-mode kwargs.pop('password') log.debug('Calling glanceclient.client.Client(%s, %s, **%s)', api_version, g_endpoint_url, kwargs) # may raise exc.HTTPUnauthorized, exc.HTTPNotFound # but we deal with those elsewhere return client.Client(api_version, g_endpoint_url, **kwargs) else: raise NotImplementedError( "Can't retrieve a auth_token without keystone") def image_create(name, location=None, profile=None, visibility=None, container_format='bare', disk_format='raw', protected=None,): ''' Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified. ''' kwargs = {} # valid options for "visibility": v_list = ['public', 'private'] # valid options for "container_format": cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf'] # valid options for "disk_format": df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] kwargs['copy_from'] = location if visibility is not None: if visibility not in v_list: raise SaltInvocationError('"visibility" needs to be one ' + 'of the following: {0}'.format(', '.join(v_list))) elif visibility == 'public': kwargs['is_public'] = True else: kwargs['is_public'] = False else: kwargs['is_public'] = True if container_format not in cf_list: raise SaltInvocationError('"container_format" needs to be ' + 'one of the following: {0}'.format(', '.join(cf_list))) else: kwargs['container_format'] = container_format if disk_format not in df_list: raise SaltInvocationError('"disk_format" needs to be one ' + 'of the following: {0}'.format(', '.join(df_list))) else: kwargs['disk_format'] = disk_format if protected is not None: kwargs['protected'] = protected # Icehouse's glanceclient doesn't have add_location() and # glanceclient.v2 doesn't implement Client.images.create() # in a usable fashion. Thus we have to use v1 for now. g_client = _auth(profile, api_version=1) image = g_client.images.create(name=name, **kwargs) return image_show(image.id, profile=profile) def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos ''' g_client = _auth(profile) image = {'id': False, 'name': None} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image id ' 'for name {0}'.format(name) } elif not name: name = image['name'] try: g_client.images.delete(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } except exc.HTTPForbidden as forbidden: log.error(six.text_type(forbidden)) return { 'result': False, 'comment': six.text_type(forbidden) } return { 'result': True, 'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id), } def image_show(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show ''' g_client = _auth(profile) ret = {} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image ID ' 'for name \'{0}\''.format(name) } try: image = g_client.images.get(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } log.debug( 'Properties of image %s:\n%s', image.name, pprint.PrettyPrinter(indent=4).pformat(image) ) schema = image_schema(profile=profile) if len(schema.keys()) == 1: schema = schema['image'] for key in schema: if key in image: ret[key] = image[key] return ret def image_list(id=None, profile=None, name=None): # pylint: disable=C0103 ''' Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list ''' g_client = _auth(profile) ret = [] for image in g_client.images.list(): if id is None and name is None: _add_image(ret, image) else: if id is not None and id == image.id: _add_image(ret, image) return ret if name == image.name: if name in ret and CUR_VER < BORON: # Not really worth an exception return { 'result': False, 'comment': 'More than one image with ' 'name "{0}"'.format(name) } _add_image(ret, image) log.debug('Returning images: %s', ret) return ret def image_schema(profile=None): ''' Returns names and descriptions of the schema "image"'s properties for this profile's instance of glance CLI Example: .. code-block:: bash salt '*' glance.image_schema ''' return schema_get('image', profile) def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos ''' if id: image = image_show(id=id, profile=profile) if 'result' in image and not image['result']: return image elif len(image) == 1: image = image.values()[0] elif name: img_list = image_list(name=name, profile=profile) if img_list is dict and 'result' in img_list: return img_list elif not img_list: return { 'result': False, 'comment': 'No image with name \'{0}\' ' 'found.'.format(name) } elif len(img_list) == 1: try: image = img_list[0] except KeyError: image = img_list[name] else: raise SaltInvocationError log.debug('Found image:\n%s', image) to_update = {} for key, value in kwargs.items(): if key.startswith('_'): continue if key not in image or image[key] != value: log.debug('add <%s=%s> to to_update', key, value) to_update[key] = value g_client = _auth(profile) updated = g_client.images.update(image['id'], **to_update) return updated def schema_get(name, profile=None): ''' Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos ''' g_client = _auth(profile) schema_props = {} for prop in g_client.schemas.get(name).properties: schema_props[prop.name] = prop.description log.debug( 'Properties of schema %s:\n%s', name, pprint.PrettyPrinter(indent=4).pformat(schema_props) ) return {name: schema_props} def _item_list(profile=None): ''' Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list ''' g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) #ret[item.name] = { # 'name': item.name, # } return ret # The following is a list of functions that need to be incorporated in the # glance module. This list should be updated as functions are added. # image-download Download a specific image. # member-create Share a specific image with a tenant. # member-delete Remove a shared image from a tenant. # member-list Describe sharing permissions by image or tenant.
saltstack/salt
salt/modules/glance.py
image_create
python
def image_create(name, location=None, profile=None, visibility=None, container_format='bare', disk_format='raw', protected=None,): ''' Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified. ''' kwargs = {} # valid options for "visibility": v_list = ['public', 'private'] # valid options for "container_format": cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf'] # valid options for "disk_format": df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] kwargs['copy_from'] = location if visibility is not None: if visibility not in v_list: raise SaltInvocationError('"visibility" needs to be one ' + 'of the following: {0}'.format(', '.join(v_list))) elif visibility == 'public': kwargs['is_public'] = True else: kwargs['is_public'] = False else: kwargs['is_public'] = True if container_format not in cf_list: raise SaltInvocationError('"container_format" needs to be ' + 'one of the following: {0}'.format(', '.join(cf_list))) else: kwargs['container_format'] = container_format if disk_format not in df_list: raise SaltInvocationError('"disk_format" needs to be one ' + 'of the following: {0}'.format(', '.join(df_list))) else: kwargs['disk_format'] = disk_format if protected is not None: kwargs['protected'] = protected # Icehouse's glanceclient doesn't have add_location() and # glanceclient.v2 doesn't implement Client.images.create() # in a usable fashion. Thus we have to use v1 for now. g_client = _auth(profile, api_version=1) image = g_client.images.create(name=name, **kwargs) return image_show(image.id, profile=profile)
Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glance.py#L219-L282
[ "def _auth(profile=None, api_version=2, **connection_args):\n '''\n Set up glance credentials, returns\n `glanceclient.client.Client`. Optional parameter\n \"api_version\" defaults to 2.\n\n Only intended to be used within glance-enabled modules\n '''\n __utils__['versions.warn_until'](\n 'Neon',\n (\n 'The glance module has been deprecated and will be removed in {version}. '\n 'Please update to using the glanceng module'\n ),\n )\n\n if profile:\n prefix = profile + \":keystone.\"\n else:\n prefix = \"keystone.\"\n\n def get(key, default=None):\n '''\n Checks connection_args, then salt-minion config,\n falls back to specified default value.\n '''\n return connection_args.get('connection_' + key,\n __salt__['config.get'](prefix + key, default))\n\n user = get('user', 'admin')\n password = get('password', None)\n tenant = get('tenant', 'admin')\n tenant_id = get('tenant_id')\n auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')\n insecure = get('insecure', False)\n admin_token = get('token')\n region = get('region')\n ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/')\n g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile)\n # The trailing 'v2' causes URLs like thise one:\n # http://127.0.0.1:9292/v2/v1/images\n g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl'])\n\n if admin_token and api_version != 1 and not password:\n # If we had a password we could just\n # ignore the admin-token and move on...\n raise SaltInvocationError('Only can use keystone admin token ' +\n 'with Glance API v1')\n elif password:\n # Can't use the admin-token anyway\n kwargs = {'username': user,\n 'password': password,\n 'tenant_id': tenant_id,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url,\n 'region_name': region,\n 'tenant_name': tenant}\n # 'insecure' keyword not supported by all v2.0 keystone clients\n # this ensures it's only passed in when defined\n if insecure:\n kwargs['insecure'] = True\n elif api_version == 1 and admin_token:\n kwargs = {'token': admin_token,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url}\n else:\n raise SaltInvocationError('No credentials to authenticate with.')\n\n if HAS_KEYSTONE:\n log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)',\n ks_endpoint, kwargs)\n keystone = kstone.Client(**kwargs)\n kwargs['token'] = keystone.get_token(keystone.session)\n # This doesn't realy prevent the password to show up\n # in the minion log as keystoneclient.session is\n # logging it anyway when in debug-mode\n kwargs.pop('password')\n log.debug('Calling glanceclient.client.Client(%s, %s, **%s)',\n api_version, g_endpoint_url, kwargs)\n # may raise exc.HTTPUnauthorized, exc.HTTPNotFound\n # but we deal with those elsewhere\n return client.Client(api_version, g_endpoint_url, **kwargs)\n else:\n raise NotImplementedError(\n \"Can't retrieve a auth_token without keystone\")\n", "def image_show(id=None, name=None, profile=None): # pylint: disable=C0103\n '''\n Return details about a specific image (glance image-show)\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' glance.image_show\n '''\n g_client = _auth(profile)\n ret = {}\n if name:\n for image in g_client.images.list():\n if image.name == name:\n id = image.id # pylint: disable=C0103\n continue\n if not id:\n return {\n 'result': False,\n 'comment':\n 'Unable to resolve image ID '\n 'for name \\'{0}\\''.format(name)\n }\n try:\n image = g_client.images.get(id)\n except exc.HTTPNotFound:\n return {\n 'result': False,\n 'comment': 'No image with ID {0}'.format(id)\n }\n log.debug(\n 'Properties of image %s:\\n%s',\n image.name, pprint.PrettyPrinter(indent=4).pformat(image)\n )\n\n schema = image_schema(profile=profile)\n if len(schema.keys()) == 1:\n schema = schema['image']\n for key in schema:\n if key in image:\n ret[key] = image[key]\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for handling openstack glance calls. :optdepends: - glanceclient Python adapter :configuration: This module is not usable until the following are specified either in a pillar or in the minion's config file:: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.insecure: False #(optional) keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles: For example:: openstack1: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' openstack2: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' With this configuration in place, any of the glance functions can make use of a configuration profile by declaring it explicitly. For example:: salt '*' glance.image_list profile=openstack1 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re # Import salt libs from salt.exceptions import ( SaltInvocationError ) from salt.version import ( __version__, SaltStackVersion ) from salt.ext import six # is there not SaltStackVersion.current() to get # the version of the salt running this code?? _version_ary = __version__.split('.') CUR_VER = SaltStackVersion(_version_ary[0], _version_ary[1]) BORON = SaltStackVersion.from_name('Boron') # pylint: disable=import-error HAS_GLANCE = False try: from glanceclient import client from glanceclient import exc HAS_GLANCE = True except ImportError: pass # Workaround, as the Glance API v2 requires you to # already have a keystone session token HAS_KEYSTONE = False try: from keystoneclient.v2_0 import client as kstone #import keystoneclient.apiclient.exceptions as kstone_exc HAS_KEYSTONE = True except ImportError: pass import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) import pprint def __virtual__(): ''' Only load this module if glance is installed on this minion. ''' if HAS_GLANCE: return 'glance' return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.') __opts__ = {} def _auth(profile=None, api_version=2, **connection_args): ''' Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules ''' __utils__['versions.warn_until']( 'Neon', ( 'The glance module has been deprecated and will be removed in {version}. ' 'Please update to using the glanceng module' ), ) if profile: prefix = profile + ":keystone." else: prefix = "keystone." def get(key, default=None): ''' Checks connection_args, then salt-minion config, falls back to specified default value. ''' return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default)) user = get('user', 'admin') password = get('password', None) tenant = get('tenant', 'admin') tenant_id = get('tenant_id') auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0') insecure = get('insecure', False) admin_token = get('token') region = get('region') ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/') g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile) # The trailing 'v2' causes URLs like thise one: # http://127.0.0.1:9292/v2/v1/images g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl']) if admin_token and api_version != 1 and not password: # If we had a password we could just # ignore the admin-token and move on... raise SaltInvocationError('Only can use keystone admin token ' + 'with Glance API v1') elif password: # Can't use the admin-token anyway kwargs = {'username': user, 'password': password, 'tenant_id': tenant_id, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url, 'region_name': region, 'tenant_name': tenant} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: kwargs['insecure'] = True elif api_version == 1 and admin_token: kwargs = {'token': admin_token, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url} else: raise SaltInvocationError('No credentials to authenticate with.') if HAS_KEYSTONE: log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)', ks_endpoint, kwargs) keystone = kstone.Client(**kwargs) kwargs['token'] = keystone.get_token(keystone.session) # This doesn't realy prevent the password to show up # in the minion log as keystoneclient.session is # logging it anyway when in debug-mode kwargs.pop('password') log.debug('Calling glanceclient.client.Client(%s, %s, **%s)', api_version, g_endpoint_url, kwargs) # may raise exc.HTTPUnauthorized, exc.HTTPNotFound # but we deal with those elsewhere return client.Client(api_version, g_endpoint_url, **kwargs) else: raise NotImplementedError( "Can't retrieve a auth_token without keystone") def _add_image(collection, image): ''' Add image to given dictionary ''' image_prep = { 'id': image.id, 'name': image.name, 'created_at': image.created_at, 'file': image.file, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'owner': image.owner, 'protected': image.protected, 'status': image.status, 'tags': image.tags, 'updated_at': image.updated_at, 'visibility': image.visibility, } # Those cause AttributeErrors in Icehouse' glanceclient for attr in ['container_format', 'disk_format', 'size']: if attr in image: image_prep[attr] = image[attr] if type(collection) is dict: collection[image.name] = image_prep elif type(collection) is list: collection.append(image_prep) else: msg = '"collection" is {0}'.format(type(collection)) +\ 'instead of dict or list.' log.error(msg) raise TypeError(msg) return collection def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos ''' g_client = _auth(profile) image = {'id': False, 'name': None} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image id ' 'for name {0}'.format(name) } elif not name: name = image['name'] try: g_client.images.delete(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } except exc.HTTPForbidden as forbidden: log.error(six.text_type(forbidden)) return { 'result': False, 'comment': six.text_type(forbidden) } return { 'result': True, 'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id), } def image_show(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show ''' g_client = _auth(profile) ret = {} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image ID ' 'for name \'{0}\''.format(name) } try: image = g_client.images.get(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } log.debug( 'Properties of image %s:\n%s', image.name, pprint.PrettyPrinter(indent=4).pformat(image) ) schema = image_schema(profile=profile) if len(schema.keys()) == 1: schema = schema['image'] for key in schema: if key in image: ret[key] = image[key] return ret def image_list(id=None, profile=None, name=None): # pylint: disable=C0103 ''' Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list ''' g_client = _auth(profile) ret = [] for image in g_client.images.list(): if id is None and name is None: _add_image(ret, image) else: if id is not None and id == image.id: _add_image(ret, image) return ret if name == image.name: if name in ret and CUR_VER < BORON: # Not really worth an exception return { 'result': False, 'comment': 'More than one image with ' 'name "{0}"'.format(name) } _add_image(ret, image) log.debug('Returning images: %s', ret) return ret def image_schema(profile=None): ''' Returns names and descriptions of the schema "image"'s properties for this profile's instance of glance CLI Example: .. code-block:: bash salt '*' glance.image_schema ''' return schema_get('image', profile) def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos ''' if id: image = image_show(id=id, profile=profile) if 'result' in image and not image['result']: return image elif len(image) == 1: image = image.values()[0] elif name: img_list = image_list(name=name, profile=profile) if img_list is dict and 'result' in img_list: return img_list elif not img_list: return { 'result': False, 'comment': 'No image with name \'{0}\' ' 'found.'.format(name) } elif len(img_list) == 1: try: image = img_list[0] except KeyError: image = img_list[name] else: raise SaltInvocationError log.debug('Found image:\n%s', image) to_update = {} for key, value in kwargs.items(): if key.startswith('_'): continue if key not in image or image[key] != value: log.debug('add <%s=%s> to to_update', key, value) to_update[key] = value g_client = _auth(profile) updated = g_client.images.update(image['id'], **to_update) return updated def schema_get(name, profile=None): ''' Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos ''' g_client = _auth(profile) schema_props = {} for prop in g_client.schemas.get(name).properties: schema_props[prop.name] = prop.description log.debug( 'Properties of schema %s:\n%s', name, pprint.PrettyPrinter(indent=4).pformat(schema_props) ) return {name: schema_props} def _item_list(profile=None): ''' Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list ''' g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) #ret[item.name] = { # 'name': item.name, # } return ret # The following is a list of functions that need to be incorporated in the # glance module. This list should be updated as functions are added. # image-download Download a specific image. # member-create Share a specific image with a tenant. # member-delete Remove a shared image from a tenant. # member-list Describe sharing permissions by image or tenant.
saltstack/salt
salt/modules/glance.py
image_delete
python
def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos ''' g_client = _auth(profile) image = {'id': False, 'name': None} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image id ' 'for name {0}'.format(name) } elif not name: name = image['name'] try: g_client.images.delete(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } except exc.HTTPForbidden as forbidden: log.error(six.text_type(forbidden)) return { 'result': False, 'comment': six.text_type(forbidden) } return { 'result': True, 'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id), }
Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glance.py#L285-L329
[ "def _auth(profile=None, api_version=2, **connection_args):\n '''\n Set up glance credentials, returns\n `glanceclient.client.Client`. Optional parameter\n \"api_version\" defaults to 2.\n\n Only intended to be used within glance-enabled modules\n '''\n __utils__['versions.warn_until'](\n 'Neon',\n (\n 'The glance module has been deprecated and will be removed in {version}. '\n 'Please update to using the glanceng module'\n ),\n )\n\n if profile:\n prefix = profile + \":keystone.\"\n else:\n prefix = \"keystone.\"\n\n def get(key, default=None):\n '''\n Checks connection_args, then salt-minion config,\n falls back to specified default value.\n '''\n return connection_args.get('connection_' + key,\n __salt__['config.get'](prefix + key, default))\n\n user = get('user', 'admin')\n password = get('password', None)\n tenant = get('tenant', 'admin')\n tenant_id = get('tenant_id')\n auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')\n insecure = get('insecure', False)\n admin_token = get('token')\n region = get('region')\n ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/')\n g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile)\n # The trailing 'v2' causes URLs like thise one:\n # http://127.0.0.1:9292/v2/v1/images\n g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl'])\n\n if admin_token and api_version != 1 and not password:\n # If we had a password we could just\n # ignore the admin-token and move on...\n raise SaltInvocationError('Only can use keystone admin token ' +\n 'with Glance API v1')\n elif password:\n # Can't use the admin-token anyway\n kwargs = {'username': user,\n 'password': password,\n 'tenant_id': tenant_id,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url,\n 'region_name': region,\n 'tenant_name': tenant}\n # 'insecure' keyword not supported by all v2.0 keystone clients\n # this ensures it's only passed in when defined\n if insecure:\n kwargs['insecure'] = True\n elif api_version == 1 and admin_token:\n kwargs = {'token': admin_token,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url}\n else:\n raise SaltInvocationError('No credentials to authenticate with.')\n\n if HAS_KEYSTONE:\n log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)',\n ks_endpoint, kwargs)\n keystone = kstone.Client(**kwargs)\n kwargs['token'] = keystone.get_token(keystone.session)\n # This doesn't realy prevent the password to show up\n # in the minion log as keystoneclient.session is\n # logging it anyway when in debug-mode\n kwargs.pop('password')\n log.debug('Calling glanceclient.client.Client(%s, %s, **%s)',\n api_version, g_endpoint_url, kwargs)\n # may raise exc.HTTPUnauthorized, exc.HTTPNotFound\n # but we deal with those elsewhere\n return client.Client(api_version, g_endpoint_url, **kwargs)\n else:\n raise NotImplementedError(\n \"Can't retrieve a auth_token without keystone\")\n" ]
# -*- coding: utf-8 -*- ''' Module for handling openstack glance calls. :optdepends: - glanceclient Python adapter :configuration: This module is not usable until the following are specified either in a pillar or in the minion's config file:: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.insecure: False #(optional) keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles: For example:: openstack1: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' openstack2: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' With this configuration in place, any of the glance functions can make use of a configuration profile by declaring it explicitly. For example:: salt '*' glance.image_list profile=openstack1 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re # Import salt libs from salt.exceptions import ( SaltInvocationError ) from salt.version import ( __version__, SaltStackVersion ) from salt.ext import six # is there not SaltStackVersion.current() to get # the version of the salt running this code?? _version_ary = __version__.split('.') CUR_VER = SaltStackVersion(_version_ary[0], _version_ary[1]) BORON = SaltStackVersion.from_name('Boron') # pylint: disable=import-error HAS_GLANCE = False try: from glanceclient import client from glanceclient import exc HAS_GLANCE = True except ImportError: pass # Workaround, as the Glance API v2 requires you to # already have a keystone session token HAS_KEYSTONE = False try: from keystoneclient.v2_0 import client as kstone #import keystoneclient.apiclient.exceptions as kstone_exc HAS_KEYSTONE = True except ImportError: pass import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) import pprint def __virtual__(): ''' Only load this module if glance is installed on this minion. ''' if HAS_GLANCE: return 'glance' return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.') __opts__ = {} def _auth(profile=None, api_version=2, **connection_args): ''' Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules ''' __utils__['versions.warn_until']( 'Neon', ( 'The glance module has been deprecated and will be removed in {version}. ' 'Please update to using the glanceng module' ), ) if profile: prefix = profile + ":keystone." else: prefix = "keystone." def get(key, default=None): ''' Checks connection_args, then salt-minion config, falls back to specified default value. ''' return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default)) user = get('user', 'admin') password = get('password', None) tenant = get('tenant', 'admin') tenant_id = get('tenant_id') auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0') insecure = get('insecure', False) admin_token = get('token') region = get('region') ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/') g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile) # The trailing 'v2' causes URLs like thise one: # http://127.0.0.1:9292/v2/v1/images g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl']) if admin_token and api_version != 1 and not password: # If we had a password we could just # ignore the admin-token and move on... raise SaltInvocationError('Only can use keystone admin token ' + 'with Glance API v1') elif password: # Can't use the admin-token anyway kwargs = {'username': user, 'password': password, 'tenant_id': tenant_id, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url, 'region_name': region, 'tenant_name': tenant} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: kwargs['insecure'] = True elif api_version == 1 and admin_token: kwargs = {'token': admin_token, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url} else: raise SaltInvocationError('No credentials to authenticate with.') if HAS_KEYSTONE: log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)', ks_endpoint, kwargs) keystone = kstone.Client(**kwargs) kwargs['token'] = keystone.get_token(keystone.session) # This doesn't realy prevent the password to show up # in the minion log as keystoneclient.session is # logging it anyway when in debug-mode kwargs.pop('password') log.debug('Calling glanceclient.client.Client(%s, %s, **%s)', api_version, g_endpoint_url, kwargs) # may raise exc.HTTPUnauthorized, exc.HTTPNotFound # but we deal with those elsewhere return client.Client(api_version, g_endpoint_url, **kwargs) else: raise NotImplementedError( "Can't retrieve a auth_token without keystone") def _add_image(collection, image): ''' Add image to given dictionary ''' image_prep = { 'id': image.id, 'name': image.name, 'created_at': image.created_at, 'file': image.file, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'owner': image.owner, 'protected': image.protected, 'status': image.status, 'tags': image.tags, 'updated_at': image.updated_at, 'visibility': image.visibility, } # Those cause AttributeErrors in Icehouse' glanceclient for attr in ['container_format', 'disk_format', 'size']: if attr in image: image_prep[attr] = image[attr] if type(collection) is dict: collection[image.name] = image_prep elif type(collection) is list: collection.append(image_prep) else: msg = '"collection" is {0}'.format(type(collection)) +\ 'instead of dict or list.' log.error(msg) raise TypeError(msg) return collection def image_create(name, location=None, profile=None, visibility=None, container_format='bare', disk_format='raw', protected=None,): ''' Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified. ''' kwargs = {} # valid options for "visibility": v_list = ['public', 'private'] # valid options for "container_format": cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf'] # valid options for "disk_format": df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] kwargs['copy_from'] = location if visibility is not None: if visibility not in v_list: raise SaltInvocationError('"visibility" needs to be one ' + 'of the following: {0}'.format(', '.join(v_list))) elif visibility == 'public': kwargs['is_public'] = True else: kwargs['is_public'] = False else: kwargs['is_public'] = True if container_format not in cf_list: raise SaltInvocationError('"container_format" needs to be ' + 'one of the following: {0}'.format(', '.join(cf_list))) else: kwargs['container_format'] = container_format if disk_format not in df_list: raise SaltInvocationError('"disk_format" needs to be one ' + 'of the following: {0}'.format(', '.join(df_list))) else: kwargs['disk_format'] = disk_format if protected is not None: kwargs['protected'] = protected # Icehouse's glanceclient doesn't have add_location() and # glanceclient.v2 doesn't implement Client.images.create() # in a usable fashion. Thus we have to use v1 for now. g_client = _auth(profile, api_version=1) image = g_client.images.create(name=name, **kwargs) return image_show(image.id, profile=profile) def image_show(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show ''' g_client = _auth(profile) ret = {} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image ID ' 'for name \'{0}\''.format(name) } try: image = g_client.images.get(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } log.debug( 'Properties of image %s:\n%s', image.name, pprint.PrettyPrinter(indent=4).pformat(image) ) schema = image_schema(profile=profile) if len(schema.keys()) == 1: schema = schema['image'] for key in schema: if key in image: ret[key] = image[key] return ret def image_list(id=None, profile=None, name=None): # pylint: disable=C0103 ''' Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list ''' g_client = _auth(profile) ret = [] for image in g_client.images.list(): if id is None and name is None: _add_image(ret, image) else: if id is not None and id == image.id: _add_image(ret, image) return ret if name == image.name: if name in ret and CUR_VER < BORON: # Not really worth an exception return { 'result': False, 'comment': 'More than one image with ' 'name "{0}"'.format(name) } _add_image(ret, image) log.debug('Returning images: %s', ret) return ret def image_schema(profile=None): ''' Returns names and descriptions of the schema "image"'s properties for this profile's instance of glance CLI Example: .. code-block:: bash salt '*' glance.image_schema ''' return schema_get('image', profile) def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos ''' if id: image = image_show(id=id, profile=profile) if 'result' in image and not image['result']: return image elif len(image) == 1: image = image.values()[0] elif name: img_list = image_list(name=name, profile=profile) if img_list is dict and 'result' in img_list: return img_list elif not img_list: return { 'result': False, 'comment': 'No image with name \'{0}\' ' 'found.'.format(name) } elif len(img_list) == 1: try: image = img_list[0] except KeyError: image = img_list[name] else: raise SaltInvocationError log.debug('Found image:\n%s', image) to_update = {} for key, value in kwargs.items(): if key.startswith('_'): continue if key not in image or image[key] != value: log.debug('add <%s=%s> to to_update', key, value) to_update[key] = value g_client = _auth(profile) updated = g_client.images.update(image['id'], **to_update) return updated def schema_get(name, profile=None): ''' Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos ''' g_client = _auth(profile) schema_props = {} for prop in g_client.schemas.get(name).properties: schema_props[prop.name] = prop.description log.debug( 'Properties of schema %s:\n%s', name, pprint.PrettyPrinter(indent=4).pformat(schema_props) ) return {name: schema_props} def _item_list(profile=None): ''' Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list ''' g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) #ret[item.name] = { # 'name': item.name, # } return ret # The following is a list of functions that need to be incorporated in the # glance module. This list should be updated as functions are added. # image-download Download a specific image. # member-create Share a specific image with a tenant. # member-delete Remove a shared image from a tenant. # member-list Describe sharing permissions by image or tenant.
saltstack/salt
salt/modules/glance.py
image_show
python
def image_show(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show ''' g_client = _auth(profile) ret = {} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image ID ' 'for name \'{0}\''.format(name) } try: image = g_client.images.get(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } log.debug( 'Properties of image %s:\n%s', image.name, pprint.PrettyPrinter(indent=4).pformat(image) ) schema = image_schema(profile=profile) if len(schema.keys()) == 1: schema = schema['image'] for key in schema: if key in image: ret[key] = image[key] return ret
Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glance.py#L332-L374
[ "def _auth(profile=None, api_version=2, **connection_args):\n '''\n Set up glance credentials, returns\n `glanceclient.client.Client`. Optional parameter\n \"api_version\" defaults to 2.\n\n Only intended to be used within glance-enabled modules\n '''\n __utils__['versions.warn_until'](\n 'Neon',\n (\n 'The glance module has been deprecated and will be removed in {version}. '\n 'Please update to using the glanceng module'\n ),\n )\n\n if profile:\n prefix = profile + \":keystone.\"\n else:\n prefix = \"keystone.\"\n\n def get(key, default=None):\n '''\n Checks connection_args, then salt-minion config,\n falls back to specified default value.\n '''\n return connection_args.get('connection_' + key,\n __salt__['config.get'](prefix + key, default))\n\n user = get('user', 'admin')\n password = get('password', None)\n tenant = get('tenant', 'admin')\n tenant_id = get('tenant_id')\n auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')\n insecure = get('insecure', False)\n admin_token = get('token')\n region = get('region')\n ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/')\n g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile)\n # The trailing 'v2' causes URLs like thise one:\n # http://127.0.0.1:9292/v2/v1/images\n g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl'])\n\n if admin_token and api_version != 1 and not password:\n # If we had a password we could just\n # ignore the admin-token and move on...\n raise SaltInvocationError('Only can use keystone admin token ' +\n 'with Glance API v1')\n elif password:\n # Can't use the admin-token anyway\n kwargs = {'username': user,\n 'password': password,\n 'tenant_id': tenant_id,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url,\n 'region_name': region,\n 'tenant_name': tenant}\n # 'insecure' keyword not supported by all v2.0 keystone clients\n # this ensures it's only passed in when defined\n if insecure:\n kwargs['insecure'] = True\n elif api_version == 1 and admin_token:\n kwargs = {'token': admin_token,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url}\n else:\n raise SaltInvocationError('No credentials to authenticate with.')\n\n if HAS_KEYSTONE:\n log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)',\n ks_endpoint, kwargs)\n keystone = kstone.Client(**kwargs)\n kwargs['token'] = keystone.get_token(keystone.session)\n # This doesn't realy prevent the password to show up\n # in the minion log as keystoneclient.session is\n # logging it anyway when in debug-mode\n kwargs.pop('password')\n log.debug('Calling glanceclient.client.Client(%s, %s, **%s)',\n api_version, g_endpoint_url, kwargs)\n # may raise exc.HTTPUnauthorized, exc.HTTPNotFound\n # but we deal with those elsewhere\n return client.Client(api_version, g_endpoint_url, **kwargs)\n else:\n raise NotImplementedError(\n \"Can't retrieve a auth_token without keystone\")\n", "def image_schema(profile=None):\n '''\n Returns names and descriptions of the schema \"image\"'s\n properties for this profile's instance of glance\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' glance.image_schema\n '''\n return schema_get('image', profile)\n" ]
# -*- coding: utf-8 -*- ''' Module for handling openstack glance calls. :optdepends: - glanceclient Python adapter :configuration: This module is not usable until the following are specified either in a pillar or in the minion's config file:: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.insecure: False #(optional) keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles: For example:: openstack1: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' openstack2: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' With this configuration in place, any of the glance functions can make use of a configuration profile by declaring it explicitly. For example:: salt '*' glance.image_list profile=openstack1 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re # Import salt libs from salt.exceptions import ( SaltInvocationError ) from salt.version import ( __version__, SaltStackVersion ) from salt.ext import six # is there not SaltStackVersion.current() to get # the version of the salt running this code?? _version_ary = __version__.split('.') CUR_VER = SaltStackVersion(_version_ary[0], _version_ary[1]) BORON = SaltStackVersion.from_name('Boron') # pylint: disable=import-error HAS_GLANCE = False try: from glanceclient import client from glanceclient import exc HAS_GLANCE = True except ImportError: pass # Workaround, as the Glance API v2 requires you to # already have a keystone session token HAS_KEYSTONE = False try: from keystoneclient.v2_0 import client as kstone #import keystoneclient.apiclient.exceptions as kstone_exc HAS_KEYSTONE = True except ImportError: pass import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) import pprint def __virtual__(): ''' Only load this module if glance is installed on this minion. ''' if HAS_GLANCE: return 'glance' return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.') __opts__ = {} def _auth(profile=None, api_version=2, **connection_args): ''' Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules ''' __utils__['versions.warn_until']( 'Neon', ( 'The glance module has been deprecated and will be removed in {version}. ' 'Please update to using the glanceng module' ), ) if profile: prefix = profile + ":keystone." else: prefix = "keystone." def get(key, default=None): ''' Checks connection_args, then salt-minion config, falls back to specified default value. ''' return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default)) user = get('user', 'admin') password = get('password', None) tenant = get('tenant', 'admin') tenant_id = get('tenant_id') auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0') insecure = get('insecure', False) admin_token = get('token') region = get('region') ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/') g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile) # The trailing 'v2' causes URLs like thise one: # http://127.0.0.1:9292/v2/v1/images g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl']) if admin_token and api_version != 1 and not password: # If we had a password we could just # ignore the admin-token and move on... raise SaltInvocationError('Only can use keystone admin token ' + 'with Glance API v1') elif password: # Can't use the admin-token anyway kwargs = {'username': user, 'password': password, 'tenant_id': tenant_id, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url, 'region_name': region, 'tenant_name': tenant} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: kwargs['insecure'] = True elif api_version == 1 and admin_token: kwargs = {'token': admin_token, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url} else: raise SaltInvocationError('No credentials to authenticate with.') if HAS_KEYSTONE: log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)', ks_endpoint, kwargs) keystone = kstone.Client(**kwargs) kwargs['token'] = keystone.get_token(keystone.session) # This doesn't realy prevent the password to show up # in the minion log as keystoneclient.session is # logging it anyway when in debug-mode kwargs.pop('password') log.debug('Calling glanceclient.client.Client(%s, %s, **%s)', api_version, g_endpoint_url, kwargs) # may raise exc.HTTPUnauthorized, exc.HTTPNotFound # but we deal with those elsewhere return client.Client(api_version, g_endpoint_url, **kwargs) else: raise NotImplementedError( "Can't retrieve a auth_token without keystone") def _add_image(collection, image): ''' Add image to given dictionary ''' image_prep = { 'id': image.id, 'name': image.name, 'created_at': image.created_at, 'file': image.file, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'owner': image.owner, 'protected': image.protected, 'status': image.status, 'tags': image.tags, 'updated_at': image.updated_at, 'visibility': image.visibility, } # Those cause AttributeErrors in Icehouse' glanceclient for attr in ['container_format', 'disk_format', 'size']: if attr in image: image_prep[attr] = image[attr] if type(collection) is dict: collection[image.name] = image_prep elif type(collection) is list: collection.append(image_prep) else: msg = '"collection" is {0}'.format(type(collection)) +\ 'instead of dict or list.' log.error(msg) raise TypeError(msg) return collection def image_create(name, location=None, profile=None, visibility=None, container_format='bare', disk_format='raw', protected=None,): ''' Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified. ''' kwargs = {} # valid options for "visibility": v_list = ['public', 'private'] # valid options for "container_format": cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf'] # valid options for "disk_format": df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] kwargs['copy_from'] = location if visibility is not None: if visibility not in v_list: raise SaltInvocationError('"visibility" needs to be one ' + 'of the following: {0}'.format(', '.join(v_list))) elif visibility == 'public': kwargs['is_public'] = True else: kwargs['is_public'] = False else: kwargs['is_public'] = True if container_format not in cf_list: raise SaltInvocationError('"container_format" needs to be ' + 'one of the following: {0}'.format(', '.join(cf_list))) else: kwargs['container_format'] = container_format if disk_format not in df_list: raise SaltInvocationError('"disk_format" needs to be one ' + 'of the following: {0}'.format(', '.join(df_list))) else: kwargs['disk_format'] = disk_format if protected is not None: kwargs['protected'] = protected # Icehouse's glanceclient doesn't have add_location() and # glanceclient.v2 doesn't implement Client.images.create() # in a usable fashion. Thus we have to use v1 for now. g_client = _auth(profile, api_version=1) image = g_client.images.create(name=name, **kwargs) return image_show(image.id, profile=profile) def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos ''' g_client = _auth(profile) image = {'id': False, 'name': None} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image id ' 'for name {0}'.format(name) } elif not name: name = image['name'] try: g_client.images.delete(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } except exc.HTTPForbidden as forbidden: log.error(six.text_type(forbidden)) return { 'result': False, 'comment': six.text_type(forbidden) } return { 'result': True, 'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id), } def image_list(id=None, profile=None, name=None): # pylint: disable=C0103 ''' Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list ''' g_client = _auth(profile) ret = [] for image in g_client.images.list(): if id is None and name is None: _add_image(ret, image) else: if id is not None and id == image.id: _add_image(ret, image) return ret if name == image.name: if name in ret and CUR_VER < BORON: # Not really worth an exception return { 'result': False, 'comment': 'More than one image with ' 'name "{0}"'.format(name) } _add_image(ret, image) log.debug('Returning images: %s', ret) return ret def image_schema(profile=None): ''' Returns names and descriptions of the schema "image"'s properties for this profile's instance of glance CLI Example: .. code-block:: bash salt '*' glance.image_schema ''' return schema_get('image', profile) def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos ''' if id: image = image_show(id=id, profile=profile) if 'result' in image and not image['result']: return image elif len(image) == 1: image = image.values()[0] elif name: img_list = image_list(name=name, profile=profile) if img_list is dict and 'result' in img_list: return img_list elif not img_list: return { 'result': False, 'comment': 'No image with name \'{0}\' ' 'found.'.format(name) } elif len(img_list) == 1: try: image = img_list[0] except KeyError: image = img_list[name] else: raise SaltInvocationError log.debug('Found image:\n%s', image) to_update = {} for key, value in kwargs.items(): if key.startswith('_'): continue if key not in image or image[key] != value: log.debug('add <%s=%s> to to_update', key, value) to_update[key] = value g_client = _auth(profile) updated = g_client.images.update(image['id'], **to_update) return updated def schema_get(name, profile=None): ''' Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos ''' g_client = _auth(profile) schema_props = {} for prop in g_client.schemas.get(name).properties: schema_props[prop.name] = prop.description log.debug( 'Properties of schema %s:\n%s', name, pprint.PrettyPrinter(indent=4).pformat(schema_props) ) return {name: schema_props} def _item_list(profile=None): ''' Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list ''' g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) #ret[item.name] = { # 'name': item.name, # } return ret # The following is a list of functions that need to be incorporated in the # glance module. This list should be updated as functions are added. # image-download Download a specific image. # member-create Share a specific image with a tenant. # member-delete Remove a shared image from a tenant. # member-list Describe sharing permissions by image or tenant.
saltstack/salt
salt/modules/glance.py
image_list
python
def image_list(id=None, profile=None, name=None): # pylint: disable=C0103 ''' Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list ''' g_client = _auth(profile) ret = [] for image in g_client.images.list(): if id is None and name is None: _add_image(ret, image) else: if id is not None and id == image.id: _add_image(ret, image) return ret if name == image.name: if name in ret and CUR_VER < BORON: # Not really worth an exception return { 'result': False, 'comment': 'More than one image with ' 'name "{0}"'.format(name) } _add_image(ret, image) log.debug('Returning images: %s', ret) return ret
Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glance.py#L377-L407
[ "def _auth(profile=None, api_version=2, **connection_args):\n '''\n Set up glance credentials, returns\n `glanceclient.client.Client`. Optional parameter\n \"api_version\" defaults to 2.\n\n Only intended to be used within glance-enabled modules\n '''\n __utils__['versions.warn_until'](\n 'Neon',\n (\n 'The glance module has been deprecated and will be removed in {version}. '\n 'Please update to using the glanceng module'\n ),\n )\n\n if profile:\n prefix = profile + \":keystone.\"\n else:\n prefix = \"keystone.\"\n\n def get(key, default=None):\n '''\n Checks connection_args, then salt-minion config,\n falls back to specified default value.\n '''\n return connection_args.get('connection_' + key,\n __salt__['config.get'](prefix + key, default))\n\n user = get('user', 'admin')\n password = get('password', None)\n tenant = get('tenant', 'admin')\n tenant_id = get('tenant_id')\n auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')\n insecure = get('insecure', False)\n admin_token = get('token')\n region = get('region')\n ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/')\n g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile)\n # The trailing 'v2' causes URLs like thise one:\n # http://127.0.0.1:9292/v2/v1/images\n g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl'])\n\n if admin_token and api_version != 1 and not password:\n # If we had a password we could just\n # ignore the admin-token and move on...\n raise SaltInvocationError('Only can use keystone admin token ' +\n 'with Glance API v1')\n elif password:\n # Can't use the admin-token anyway\n kwargs = {'username': user,\n 'password': password,\n 'tenant_id': tenant_id,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url,\n 'region_name': region,\n 'tenant_name': tenant}\n # 'insecure' keyword not supported by all v2.0 keystone clients\n # this ensures it's only passed in when defined\n if insecure:\n kwargs['insecure'] = True\n elif api_version == 1 and admin_token:\n kwargs = {'token': admin_token,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url}\n else:\n raise SaltInvocationError('No credentials to authenticate with.')\n\n if HAS_KEYSTONE:\n log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)',\n ks_endpoint, kwargs)\n keystone = kstone.Client(**kwargs)\n kwargs['token'] = keystone.get_token(keystone.session)\n # This doesn't realy prevent the password to show up\n # in the minion log as keystoneclient.session is\n # logging it anyway when in debug-mode\n kwargs.pop('password')\n log.debug('Calling glanceclient.client.Client(%s, %s, **%s)',\n api_version, g_endpoint_url, kwargs)\n # may raise exc.HTTPUnauthorized, exc.HTTPNotFound\n # but we deal with those elsewhere\n return client.Client(api_version, g_endpoint_url, **kwargs)\n else:\n raise NotImplementedError(\n \"Can't retrieve a auth_token without keystone\")\n", "def _add_image(collection, image):\n '''\n Add image to given dictionary\n '''\n image_prep = {\n 'id': image.id,\n 'name': image.name,\n 'created_at': image.created_at,\n 'file': image.file,\n 'min_disk': image.min_disk,\n 'min_ram': image.min_ram,\n 'owner': image.owner,\n 'protected': image.protected,\n 'status': image.status,\n 'tags': image.tags,\n 'updated_at': image.updated_at,\n 'visibility': image.visibility,\n }\n # Those cause AttributeErrors in Icehouse' glanceclient\n for attr in ['container_format', 'disk_format', 'size']:\n if attr in image:\n image_prep[attr] = image[attr]\n if type(collection) is dict:\n collection[image.name] = image_prep\n elif type(collection) is list:\n collection.append(image_prep)\n else:\n msg = '\"collection\" is {0}'.format(type(collection)) +\\\n 'instead of dict or list.'\n log.error(msg)\n raise TypeError(msg)\n return collection\n" ]
# -*- coding: utf-8 -*- ''' Module for handling openstack glance calls. :optdepends: - glanceclient Python adapter :configuration: This module is not usable until the following are specified either in a pillar or in the minion's config file:: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.insecure: False #(optional) keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles: For example:: openstack1: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' openstack2: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' With this configuration in place, any of the glance functions can make use of a configuration profile by declaring it explicitly. For example:: salt '*' glance.image_list profile=openstack1 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re # Import salt libs from salt.exceptions import ( SaltInvocationError ) from salt.version import ( __version__, SaltStackVersion ) from salt.ext import six # is there not SaltStackVersion.current() to get # the version of the salt running this code?? _version_ary = __version__.split('.') CUR_VER = SaltStackVersion(_version_ary[0], _version_ary[1]) BORON = SaltStackVersion.from_name('Boron') # pylint: disable=import-error HAS_GLANCE = False try: from glanceclient import client from glanceclient import exc HAS_GLANCE = True except ImportError: pass # Workaround, as the Glance API v2 requires you to # already have a keystone session token HAS_KEYSTONE = False try: from keystoneclient.v2_0 import client as kstone #import keystoneclient.apiclient.exceptions as kstone_exc HAS_KEYSTONE = True except ImportError: pass import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) import pprint def __virtual__(): ''' Only load this module if glance is installed on this minion. ''' if HAS_GLANCE: return 'glance' return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.') __opts__ = {} def _auth(profile=None, api_version=2, **connection_args): ''' Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules ''' __utils__['versions.warn_until']( 'Neon', ( 'The glance module has been deprecated and will be removed in {version}. ' 'Please update to using the glanceng module' ), ) if profile: prefix = profile + ":keystone." else: prefix = "keystone." def get(key, default=None): ''' Checks connection_args, then salt-minion config, falls back to specified default value. ''' return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default)) user = get('user', 'admin') password = get('password', None) tenant = get('tenant', 'admin') tenant_id = get('tenant_id') auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0') insecure = get('insecure', False) admin_token = get('token') region = get('region') ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/') g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile) # The trailing 'v2' causes URLs like thise one: # http://127.0.0.1:9292/v2/v1/images g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl']) if admin_token and api_version != 1 and not password: # If we had a password we could just # ignore the admin-token and move on... raise SaltInvocationError('Only can use keystone admin token ' + 'with Glance API v1') elif password: # Can't use the admin-token anyway kwargs = {'username': user, 'password': password, 'tenant_id': tenant_id, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url, 'region_name': region, 'tenant_name': tenant} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: kwargs['insecure'] = True elif api_version == 1 and admin_token: kwargs = {'token': admin_token, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url} else: raise SaltInvocationError('No credentials to authenticate with.') if HAS_KEYSTONE: log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)', ks_endpoint, kwargs) keystone = kstone.Client(**kwargs) kwargs['token'] = keystone.get_token(keystone.session) # This doesn't realy prevent the password to show up # in the minion log as keystoneclient.session is # logging it anyway when in debug-mode kwargs.pop('password') log.debug('Calling glanceclient.client.Client(%s, %s, **%s)', api_version, g_endpoint_url, kwargs) # may raise exc.HTTPUnauthorized, exc.HTTPNotFound # but we deal with those elsewhere return client.Client(api_version, g_endpoint_url, **kwargs) else: raise NotImplementedError( "Can't retrieve a auth_token without keystone") def _add_image(collection, image): ''' Add image to given dictionary ''' image_prep = { 'id': image.id, 'name': image.name, 'created_at': image.created_at, 'file': image.file, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'owner': image.owner, 'protected': image.protected, 'status': image.status, 'tags': image.tags, 'updated_at': image.updated_at, 'visibility': image.visibility, } # Those cause AttributeErrors in Icehouse' glanceclient for attr in ['container_format', 'disk_format', 'size']: if attr in image: image_prep[attr] = image[attr] if type(collection) is dict: collection[image.name] = image_prep elif type(collection) is list: collection.append(image_prep) else: msg = '"collection" is {0}'.format(type(collection)) +\ 'instead of dict or list.' log.error(msg) raise TypeError(msg) return collection def image_create(name, location=None, profile=None, visibility=None, container_format='bare', disk_format='raw', protected=None,): ''' Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified. ''' kwargs = {} # valid options for "visibility": v_list = ['public', 'private'] # valid options for "container_format": cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf'] # valid options for "disk_format": df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] kwargs['copy_from'] = location if visibility is not None: if visibility not in v_list: raise SaltInvocationError('"visibility" needs to be one ' + 'of the following: {0}'.format(', '.join(v_list))) elif visibility == 'public': kwargs['is_public'] = True else: kwargs['is_public'] = False else: kwargs['is_public'] = True if container_format not in cf_list: raise SaltInvocationError('"container_format" needs to be ' + 'one of the following: {0}'.format(', '.join(cf_list))) else: kwargs['container_format'] = container_format if disk_format not in df_list: raise SaltInvocationError('"disk_format" needs to be one ' + 'of the following: {0}'.format(', '.join(df_list))) else: kwargs['disk_format'] = disk_format if protected is not None: kwargs['protected'] = protected # Icehouse's glanceclient doesn't have add_location() and # glanceclient.v2 doesn't implement Client.images.create() # in a usable fashion. Thus we have to use v1 for now. g_client = _auth(profile, api_version=1) image = g_client.images.create(name=name, **kwargs) return image_show(image.id, profile=profile) def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos ''' g_client = _auth(profile) image = {'id': False, 'name': None} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image id ' 'for name {0}'.format(name) } elif not name: name = image['name'] try: g_client.images.delete(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } except exc.HTTPForbidden as forbidden: log.error(six.text_type(forbidden)) return { 'result': False, 'comment': six.text_type(forbidden) } return { 'result': True, 'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id), } def image_show(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show ''' g_client = _auth(profile) ret = {} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image ID ' 'for name \'{0}\''.format(name) } try: image = g_client.images.get(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } log.debug( 'Properties of image %s:\n%s', image.name, pprint.PrettyPrinter(indent=4).pformat(image) ) schema = image_schema(profile=profile) if len(schema.keys()) == 1: schema = schema['image'] for key in schema: if key in image: ret[key] = image[key] return ret def image_schema(profile=None): ''' Returns names and descriptions of the schema "image"'s properties for this profile's instance of glance CLI Example: .. code-block:: bash salt '*' glance.image_schema ''' return schema_get('image', profile) def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos ''' if id: image = image_show(id=id, profile=profile) if 'result' in image and not image['result']: return image elif len(image) == 1: image = image.values()[0] elif name: img_list = image_list(name=name, profile=profile) if img_list is dict and 'result' in img_list: return img_list elif not img_list: return { 'result': False, 'comment': 'No image with name \'{0}\' ' 'found.'.format(name) } elif len(img_list) == 1: try: image = img_list[0] except KeyError: image = img_list[name] else: raise SaltInvocationError log.debug('Found image:\n%s', image) to_update = {} for key, value in kwargs.items(): if key.startswith('_'): continue if key not in image or image[key] != value: log.debug('add <%s=%s> to to_update', key, value) to_update[key] = value g_client = _auth(profile) updated = g_client.images.update(image['id'], **to_update) return updated def schema_get(name, profile=None): ''' Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos ''' g_client = _auth(profile) schema_props = {} for prop in g_client.schemas.get(name).properties: schema_props[prop.name] = prop.description log.debug( 'Properties of schema %s:\n%s', name, pprint.PrettyPrinter(indent=4).pformat(schema_props) ) return {name: schema_props} def _item_list(profile=None): ''' Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list ''' g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) #ret[item.name] = { # 'name': item.name, # } return ret # The following is a list of functions that need to be incorporated in the # glance module. This list should be updated as functions are added. # image-download Download a specific image. # member-create Share a specific image with a tenant. # member-delete Remove a shared image from a tenant. # member-list Describe sharing permissions by image or tenant.
saltstack/salt
salt/modules/glance.py
image_update
python
def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos ''' if id: image = image_show(id=id, profile=profile) if 'result' in image and not image['result']: return image elif len(image) == 1: image = image.values()[0] elif name: img_list = image_list(name=name, profile=profile) if img_list is dict and 'result' in img_list: return img_list elif not img_list: return { 'result': False, 'comment': 'No image with name \'{0}\' ' 'found.'.format(name) } elif len(img_list) == 1: try: image = img_list[0] except KeyError: image = img_list[name] else: raise SaltInvocationError log.debug('Found image:\n%s', image) to_update = {} for key, value in kwargs.items(): if key.startswith('_'): continue if key not in image or image[key] != value: log.debug('add <%s=%s> to to_update', key, value) to_update[key] = value g_client = _auth(profile) updated = g_client.images.update(image['id'], **to_update) return updated
Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glance.py#L424-L473
[ "def image_list(id=None, profile=None, name=None): # pylint: disable=C0103\n '''\n Return a list of available images (glance image-list)\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' glance.image_list\n '''\n g_client = _auth(profile)\n ret = []\n for image in g_client.images.list():\n if id is None and name is None:\n _add_image(ret, image)\n else:\n if id is not None and id == image.id:\n _add_image(ret, image)\n return ret\n if name == image.name:\n if name in ret and CUR_VER < BORON:\n # Not really worth an exception\n return {\n 'result': False,\n 'comment':\n 'More than one image with '\n 'name \"{0}\"'.format(name)\n }\n _add_image(ret, image)\n log.debug('Returning images: %s', ret)\n return ret\n", "def _auth(profile=None, api_version=2, **connection_args):\n '''\n Set up glance credentials, returns\n `glanceclient.client.Client`. Optional parameter\n \"api_version\" defaults to 2.\n\n Only intended to be used within glance-enabled modules\n '''\n __utils__['versions.warn_until'](\n 'Neon',\n (\n 'The glance module has been deprecated and will be removed in {version}. '\n 'Please update to using the glanceng module'\n ),\n )\n\n if profile:\n prefix = profile + \":keystone.\"\n else:\n prefix = \"keystone.\"\n\n def get(key, default=None):\n '''\n Checks connection_args, then salt-minion config,\n falls back to specified default value.\n '''\n return connection_args.get('connection_' + key,\n __salt__['config.get'](prefix + key, default))\n\n user = get('user', 'admin')\n password = get('password', None)\n tenant = get('tenant', 'admin')\n tenant_id = get('tenant_id')\n auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')\n insecure = get('insecure', False)\n admin_token = get('token')\n region = get('region')\n ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/')\n g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile)\n # The trailing 'v2' causes URLs like thise one:\n # http://127.0.0.1:9292/v2/v1/images\n g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl'])\n\n if admin_token and api_version != 1 and not password:\n # If we had a password we could just\n # ignore the admin-token and move on...\n raise SaltInvocationError('Only can use keystone admin token ' +\n 'with Glance API v1')\n elif password:\n # Can't use the admin-token anyway\n kwargs = {'username': user,\n 'password': password,\n 'tenant_id': tenant_id,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url,\n 'region_name': region,\n 'tenant_name': tenant}\n # 'insecure' keyword not supported by all v2.0 keystone clients\n # this ensures it's only passed in when defined\n if insecure:\n kwargs['insecure'] = True\n elif api_version == 1 and admin_token:\n kwargs = {'token': admin_token,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url}\n else:\n raise SaltInvocationError('No credentials to authenticate with.')\n\n if HAS_KEYSTONE:\n log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)',\n ks_endpoint, kwargs)\n keystone = kstone.Client(**kwargs)\n kwargs['token'] = keystone.get_token(keystone.session)\n # This doesn't realy prevent the password to show up\n # in the minion log as keystoneclient.session is\n # logging it anyway when in debug-mode\n kwargs.pop('password')\n log.debug('Calling glanceclient.client.Client(%s, %s, **%s)',\n api_version, g_endpoint_url, kwargs)\n # may raise exc.HTTPUnauthorized, exc.HTTPNotFound\n # but we deal with those elsewhere\n return client.Client(api_version, g_endpoint_url, **kwargs)\n else:\n raise NotImplementedError(\n \"Can't retrieve a auth_token without keystone\")\n", "def image_show(id=None, name=None, profile=None): # pylint: disable=C0103\n '''\n Return details about a specific image (glance image-show)\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' glance.image_show\n '''\n g_client = _auth(profile)\n ret = {}\n if name:\n for image in g_client.images.list():\n if image.name == name:\n id = image.id # pylint: disable=C0103\n continue\n if not id:\n return {\n 'result': False,\n 'comment':\n 'Unable to resolve image ID '\n 'for name \\'{0}\\''.format(name)\n }\n try:\n image = g_client.images.get(id)\n except exc.HTTPNotFound:\n return {\n 'result': False,\n 'comment': 'No image with ID {0}'.format(id)\n }\n log.debug(\n 'Properties of image %s:\\n%s',\n image.name, pprint.PrettyPrinter(indent=4).pformat(image)\n )\n\n schema = image_schema(profile=profile)\n if len(schema.keys()) == 1:\n schema = schema['image']\n for key in schema:\n if key in image:\n ret[key] = image[key]\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for handling openstack glance calls. :optdepends: - glanceclient Python adapter :configuration: This module is not usable until the following are specified either in a pillar or in the minion's config file:: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.insecure: False #(optional) keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles: For example:: openstack1: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' openstack2: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' With this configuration in place, any of the glance functions can make use of a configuration profile by declaring it explicitly. For example:: salt '*' glance.image_list profile=openstack1 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re # Import salt libs from salt.exceptions import ( SaltInvocationError ) from salt.version import ( __version__, SaltStackVersion ) from salt.ext import six # is there not SaltStackVersion.current() to get # the version of the salt running this code?? _version_ary = __version__.split('.') CUR_VER = SaltStackVersion(_version_ary[0], _version_ary[1]) BORON = SaltStackVersion.from_name('Boron') # pylint: disable=import-error HAS_GLANCE = False try: from glanceclient import client from glanceclient import exc HAS_GLANCE = True except ImportError: pass # Workaround, as the Glance API v2 requires you to # already have a keystone session token HAS_KEYSTONE = False try: from keystoneclient.v2_0 import client as kstone #import keystoneclient.apiclient.exceptions as kstone_exc HAS_KEYSTONE = True except ImportError: pass import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) import pprint def __virtual__(): ''' Only load this module if glance is installed on this minion. ''' if HAS_GLANCE: return 'glance' return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.') __opts__ = {} def _auth(profile=None, api_version=2, **connection_args): ''' Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules ''' __utils__['versions.warn_until']( 'Neon', ( 'The glance module has been deprecated and will be removed in {version}. ' 'Please update to using the glanceng module' ), ) if profile: prefix = profile + ":keystone." else: prefix = "keystone." def get(key, default=None): ''' Checks connection_args, then salt-minion config, falls back to specified default value. ''' return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default)) user = get('user', 'admin') password = get('password', None) tenant = get('tenant', 'admin') tenant_id = get('tenant_id') auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0') insecure = get('insecure', False) admin_token = get('token') region = get('region') ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/') g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile) # The trailing 'v2' causes URLs like thise one: # http://127.0.0.1:9292/v2/v1/images g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl']) if admin_token and api_version != 1 and not password: # If we had a password we could just # ignore the admin-token and move on... raise SaltInvocationError('Only can use keystone admin token ' + 'with Glance API v1') elif password: # Can't use the admin-token anyway kwargs = {'username': user, 'password': password, 'tenant_id': tenant_id, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url, 'region_name': region, 'tenant_name': tenant} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: kwargs['insecure'] = True elif api_version == 1 and admin_token: kwargs = {'token': admin_token, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url} else: raise SaltInvocationError('No credentials to authenticate with.') if HAS_KEYSTONE: log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)', ks_endpoint, kwargs) keystone = kstone.Client(**kwargs) kwargs['token'] = keystone.get_token(keystone.session) # This doesn't realy prevent the password to show up # in the minion log as keystoneclient.session is # logging it anyway when in debug-mode kwargs.pop('password') log.debug('Calling glanceclient.client.Client(%s, %s, **%s)', api_version, g_endpoint_url, kwargs) # may raise exc.HTTPUnauthorized, exc.HTTPNotFound # but we deal with those elsewhere return client.Client(api_version, g_endpoint_url, **kwargs) else: raise NotImplementedError( "Can't retrieve a auth_token without keystone") def _add_image(collection, image): ''' Add image to given dictionary ''' image_prep = { 'id': image.id, 'name': image.name, 'created_at': image.created_at, 'file': image.file, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'owner': image.owner, 'protected': image.protected, 'status': image.status, 'tags': image.tags, 'updated_at': image.updated_at, 'visibility': image.visibility, } # Those cause AttributeErrors in Icehouse' glanceclient for attr in ['container_format', 'disk_format', 'size']: if attr in image: image_prep[attr] = image[attr] if type(collection) is dict: collection[image.name] = image_prep elif type(collection) is list: collection.append(image_prep) else: msg = '"collection" is {0}'.format(type(collection)) +\ 'instead of dict or list.' log.error(msg) raise TypeError(msg) return collection def image_create(name, location=None, profile=None, visibility=None, container_format='bare', disk_format='raw', protected=None,): ''' Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified. ''' kwargs = {} # valid options for "visibility": v_list = ['public', 'private'] # valid options for "container_format": cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf'] # valid options for "disk_format": df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] kwargs['copy_from'] = location if visibility is not None: if visibility not in v_list: raise SaltInvocationError('"visibility" needs to be one ' + 'of the following: {0}'.format(', '.join(v_list))) elif visibility == 'public': kwargs['is_public'] = True else: kwargs['is_public'] = False else: kwargs['is_public'] = True if container_format not in cf_list: raise SaltInvocationError('"container_format" needs to be ' + 'one of the following: {0}'.format(', '.join(cf_list))) else: kwargs['container_format'] = container_format if disk_format not in df_list: raise SaltInvocationError('"disk_format" needs to be one ' + 'of the following: {0}'.format(', '.join(df_list))) else: kwargs['disk_format'] = disk_format if protected is not None: kwargs['protected'] = protected # Icehouse's glanceclient doesn't have add_location() and # glanceclient.v2 doesn't implement Client.images.create() # in a usable fashion. Thus we have to use v1 for now. g_client = _auth(profile, api_version=1) image = g_client.images.create(name=name, **kwargs) return image_show(image.id, profile=profile) def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos ''' g_client = _auth(profile) image = {'id': False, 'name': None} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image id ' 'for name {0}'.format(name) } elif not name: name = image['name'] try: g_client.images.delete(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } except exc.HTTPForbidden as forbidden: log.error(six.text_type(forbidden)) return { 'result': False, 'comment': six.text_type(forbidden) } return { 'result': True, 'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id), } def image_show(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show ''' g_client = _auth(profile) ret = {} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image ID ' 'for name \'{0}\''.format(name) } try: image = g_client.images.get(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } log.debug( 'Properties of image %s:\n%s', image.name, pprint.PrettyPrinter(indent=4).pformat(image) ) schema = image_schema(profile=profile) if len(schema.keys()) == 1: schema = schema['image'] for key in schema: if key in image: ret[key] = image[key] return ret def image_list(id=None, profile=None, name=None): # pylint: disable=C0103 ''' Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list ''' g_client = _auth(profile) ret = [] for image in g_client.images.list(): if id is None and name is None: _add_image(ret, image) else: if id is not None and id == image.id: _add_image(ret, image) return ret if name == image.name: if name in ret and CUR_VER < BORON: # Not really worth an exception return { 'result': False, 'comment': 'More than one image with ' 'name "{0}"'.format(name) } _add_image(ret, image) log.debug('Returning images: %s', ret) return ret def image_schema(profile=None): ''' Returns names and descriptions of the schema "image"'s properties for this profile's instance of glance CLI Example: .. code-block:: bash salt '*' glance.image_schema ''' return schema_get('image', profile) def schema_get(name, profile=None): ''' Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos ''' g_client = _auth(profile) schema_props = {} for prop in g_client.schemas.get(name).properties: schema_props[prop.name] = prop.description log.debug( 'Properties of schema %s:\n%s', name, pprint.PrettyPrinter(indent=4).pformat(schema_props) ) return {name: schema_props} def _item_list(profile=None): ''' Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list ''' g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) #ret[item.name] = { # 'name': item.name, # } return ret # The following is a list of functions that need to be incorporated in the # glance module. This list should be updated as functions are added. # image-download Download a specific image. # member-create Share a specific image with a tenant. # member-delete Remove a shared image from a tenant. # member-list Describe sharing permissions by image or tenant.
saltstack/salt
salt/modules/glance.py
schema_get
python
def schema_get(name, profile=None): ''' Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos ''' g_client = _auth(profile) schema_props = {} for prop in g_client.schemas.get(name).properties: schema_props[prop.name] = prop.description log.debug( 'Properties of schema %s:\n%s', name, pprint.PrettyPrinter(indent=4).pformat(schema_props) ) return {name: schema_props}
Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glance.py#L476-L498
[ "def _auth(profile=None, api_version=2, **connection_args):\n '''\n Set up glance credentials, returns\n `glanceclient.client.Client`. Optional parameter\n \"api_version\" defaults to 2.\n\n Only intended to be used within glance-enabled modules\n '''\n __utils__['versions.warn_until'](\n 'Neon',\n (\n 'The glance module has been deprecated and will be removed in {version}. '\n 'Please update to using the glanceng module'\n ),\n )\n\n if profile:\n prefix = profile + \":keystone.\"\n else:\n prefix = \"keystone.\"\n\n def get(key, default=None):\n '''\n Checks connection_args, then salt-minion config,\n falls back to specified default value.\n '''\n return connection_args.get('connection_' + key,\n __salt__['config.get'](prefix + key, default))\n\n user = get('user', 'admin')\n password = get('password', None)\n tenant = get('tenant', 'admin')\n tenant_id = get('tenant_id')\n auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')\n insecure = get('insecure', False)\n admin_token = get('token')\n region = get('region')\n ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/')\n g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile)\n # The trailing 'v2' causes URLs like thise one:\n # http://127.0.0.1:9292/v2/v1/images\n g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl'])\n\n if admin_token and api_version != 1 and not password:\n # If we had a password we could just\n # ignore the admin-token and move on...\n raise SaltInvocationError('Only can use keystone admin token ' +\n 'with Glance API v1')\n elif password:\n # Can't use the admin-token anyway\n kwargs = {'username': user,\n 'password': password,\n 'tenant_id': tenant_id,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url,\n 'region_name': region,\n 'tenant_name': tenant}\n # 'insecure' keyword not supported by all v2.0 keystone clients\n # this ensures it's only passed in when defined\n if insecure:\n kwargs['insecure'] = True\n elif api_version == 1 and admin_token:\n kwargs = {'token': admin_token,\n 'auth_url': auth_url,\n 'endpoint_url': g_endpoint_url}\n else:\n raise SaltInvocationError('No credentials to authenticate with.')\n\n if HAS_KEYSTONE:\n log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)',\n ks_endpoint, kwargs)\n keystone = kstone.Client(**kwargs)\n kwargs['token'] = keystone.get_token(keystone.session)\n # This doesn't realy prevent the password to show up\n # in the minion log as keystoneclient.session is\n # logging it anyway when in debug-mode\n kwargs.pop('password')\n log.debug('Calling glanceclient.client.Client(%s, %s, **%s)',\n api_version, g_endpoint_url, kwargs)\n # may raise exc.HTTPUnauthorized, exc.HTTPNotFound\n # but we deal with those elsewhere\n return client.Client(api_version, g_endpoint_url, **kwargs)\n else:\n raise NotImplementedError(\n \"Can't retrieve a auth_token without keystone\")\n" ]
# -*- coding: utf-8 -*- ''' Module for handling openstack glance calls. :optdepends: - glanceclient Python adapter :configuration: This module is not usable until the following are specified either in a pillar or in the minion's config file:: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.insecure: False #(optional) keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles: For example:: openstack1: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' openstack2: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' With this configuration in place, any of the glance functions can make use of a configuration profile by declaring it explicitly. For example:: salt '*' glance.image_list profile=openstack1 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re # Import salt libs from salt.exceptions import ( SaltInvocationError ) from salt.version import ( __version__, SaltStackVersion ) from salt.ext import six # is there not SaltStackVersion.current() to get # the version of the salt running this code?? _version_ary = __version__.split('.') CUR_VER = SaltStackVersion(_version_ary[0], _version_ary[1]) BORON = SaltStackVersion.from_name('Boron') # pylint: disable=import-error HAS_GLANCE = False try: from glanceclient import client from glanceclient import exc HAS_GLANCE = True except ImportError: pass # Workaround, as the Glance API v2 requires you to # already have a keystone session token HAS_KEYSTONE = False try: from keystoneclient.v2_0 import client as kstone #import keystoneclient.apiclient.exceptions as kstone_exc HAS_KEYSTONE = True except ImportError: pass import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) import pprint def __virtual__(): ''' Only load this module if glance is installed on this minion. ''' if HAS_GLANCE: return 'glance' return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.') __opts__ = {} def _auth(profile=None, api_version=2, **connection_args): ''' Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules ''' __utils__['versions.warn_until']( 'Neon', ( 'The glance module has been deprecated and will be removed in {version}. ' 'Please update to using the glanceng module' ), ) if profile: prefix = profile + ":keystone." else: prefix = "keystone." def get(key, default=None): ''' Checks connection_args, then salt-minion config, falls back to specified default value. ''' return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default)) user = get('user', 'admin') password = get('password', None) tenant = get('tenant', 'admin') tenant_id = get('tenant_id') auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0') insecure = get('insecure', False) admin_token = get('token') region = get('region') ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/') g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile) # The trailing 'v2' causes URLs like thise one: # http://127.0.0.1:9292/v2/v1/images g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl']) if admin_token and api_version != 1 and not password: # If we had a password we could just # ignore the admin-token and move on... raise SaltInvocationError('Only can use keystone admin token ' + 'with Glance API v1') elif password: # Can't use the admin-token anyway kwargs = {'username': user, 'password': password, 'tenant_id': tenant_id, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url, 'region_name': region, 'tenant_name': tenant} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: kwargs['insecure'] = True elif api_version == 1 and admin_token: kwargs = {'token': admin_token, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url} else: raise SaltInvocationError('No credentials to authenticate with.') if HAS_KEYSTONE: log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)', ks_endpoint, kwargs) keystone = kstone.Client(**kwargs) kwargs['token'] = keystone.get_token(keystone.session) # This doesn't realy prevent the password to show up # in the minion log as keystoneclient.session is # logging it anyway when in debug-mode kwargs.pop('password') log.debug('Calling glanceclient.client.Client(%s, %s, **%s)', api_version, g_endpoint_url, kwargs) # may raise exc.HTTPUnauthorized, exc.HTTPNotFound # but we deal with those elsewhere return client.Client(api_version, g_endpoint_url, **kwargs) else: raise NotImplementedError( "Can't retrieve a auth_token without keystone") def _add_image(collection, image): ''' Add image to given dictionary ''' image_prep = { 'id': image.id, 'name': image.name, 'created_at': image.created_at, 'file': image.file, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'owner': image.owner, 'protected': image.protected, 'status': image.status, 'tags': image.tags, 'updated_at': image.updated_at, 'visibility': image.visibility, } # Those cause AttributeErrors in Icehouse' glanceclient for attr in ['container_format', 'disk_format', 'size']: if attr in image: image_prep[attr] = image[attr] if type(collection) is dict: collection[image.name] = image_prep elif type(collection) is list: collection.append(image_prep) else: msg = '"collection" is {0}'.format(type(collection)) +\ 'instead of dict or list.' log.error(msg) raise TypeError(msg) return collection def image_create(name, location=None, profile=None, visibility=None, container_format='bare', disk_format='raw', protected=None,): ''' Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified. ''' kwargs = {} # valid options for "visibility": v_list = ['public', 'private'] # valid options for "container_format": cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf'] # valid options for "disk_format": df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] kwargs['copy_from'] = location if visibility is not None: if visibility not in v_list: raise SaltInvocationError('"visibility" needs to be one ' + 'of the following: {0}'.format(', '.join(v_list))) elif visibility == 'public': kwargs['is_public'] = True else: kwargs['is_public'] = False else: kwargs['is_public'] = True if container_format not in cf_list: raise SaltInvocationError('"container_format" needs to be ' + 'one of the following: {0}'.format(', '.join(cf_list))) else: kwargs['container_format'] = container_format if disk_format not in df_list: raise SaltInvocationError('"disk_format" needs to be one ' + 'of the following: {0}'.format(', '.join(df_list))) else: kwargs['disk_format'] = disk_format if protected is not None: kwargs['protected'] = protected # Icehouse's glanceclient doesn't have add_location() and # glanceclient.v2 doesn't implement Client.images.create() # in a usable fashion. Thus we have to use v1 for now. g_client = _auth(profile, api_version=1) image = g_client.images.create(name=name, **kwargs) return image_show(image.id, profile=profile) def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos ''' g_client = _auth(profile) image = {'id': False, 'name': None} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image id ' 'for name {0}'.format(name) } elif not name: name = image['name'] try: g_client.images.delete(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } except exc.HTTPForbidden as forbidden: log.error(six.text_type(forbidden)) return { 'result': False, 'comment': six.text_type(forbidden) } return { 'result': True, 'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id), } def image_show(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show ''' g_client = _auth(profile) ret = {} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image ID ' 'for name \'{0}\''.format(name) } try: image = g_client.images.get(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } log.debug( 'Properties of image %s:\n%s', image.name, pprint.PrettyPrinter(indent=4).pformat(image) ) schema = image_schema(profile=profile) if len(schema.keys()) == 1: schema = schema['image'] for key in schema: if key in image: ret[key] = image[key] return ret def image_list(id=None, profile=None, name=None): # pylint: disable=C0103 ''' Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list ''' g_client = _auth(profile) ret = [] for image in g_client.images.list(): if id is None and name is None: _add_image(ret, image) else: if id is not None and id == image.id: _add_image(ret, image) return ret if name == image.name: if name in ret and CUR_VER < BORON: # Not really worth an exception return { 'result': False, 'comment': 'More than one image with ' 'name "{0}"'.format(name) } _add_image(ret, image) log.debug('Returning images: %s', ret) return ret def image_schema(profile=None): ''' Returns names and descriptions of the schema "image"'s properties for this profile's instance of glance CLI Example: .. code-block:: bash salt '*' glance.image_schema ''' return schema_get('image', profile) def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos ''' if id: image = image_show(id=id, profile=profile) if 'result' in image and not image['result']: return image elif len(image) == 1: image = image.values()[0] elif name: img_list = image_list(name=name, profile=profile) if img_list is dict and 'result' in img_list: return img_list elif not img_list: return { 'result': False, 'comment': 'No image with name \'{0}\' ' 'found.'.format(name) } elif len(img_list) == 1: try: image = img_list[0] except KeyError: image = img_list[name] else: raise SaltInvocationError log.debug('Found image:\n%s', image) to_update = {} for key, value in kwargs.items(): if key.startswith('_'): continue if key not in image or image[key] != value: log.debug('add <%s=%s> to to_update', key, value) to_update[key] = value g_client = _auth(profile) updated = g_client.images.update(image['id'], **to_update) return updated def _item_list(profile=None): ''' Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list ''' g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) #ret[item.name] = { # 'name': item.name, # } return ret # The following is a list of functions that need to be incorporated in the # glance module. This list should be updated as functions are added. # image-download Download a specific image. # member-create Share a specific image with a tenant. # member-delete Remove a shared image from a tenant. # member-list Describe sharing permissions by image or tenant.
saltstack/salt
salt/modules/glance.py
_item_list
python
def _item_list(profile=None): ''' Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list ''' g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) #ret[item.name] = { # 'name': item.name, # } return ret
Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glance.py#L501-L519
null
# -*- coding: utf-8 -*- ''' Module for handling openstack glance calls. :optdepends: - glanceclient Python adapter :configuration: This module is not usable until the following are specified either in a pillar or in the minion's config file:: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.insecure: False #(optional) keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles: For example:: openstack1: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' openstack2: keystone.user: admin keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' With this configuration in place, any of the glance functions can make use of a configuration profile by declaring it explicitly. For example:: salt '*' glance.image_list profile=openstack1 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re # Import salt libs from salt.exceptions import ( SaltInvocationError ) from salt.version import ( __version__, SaltStackVersion ) from salt.ext import six # is there not SaltStackVersion.current() to get # the version of the salt running this code?? _version_ary = __version__.split('.') CUR_VER = SaltStackVersion(_version_ary[0], _version_ary[1]) BORON = SaltStackVersion.from_name('Boron') # pylint: disable=import-error HAS_GLANCE = False try: from glanceclient import client from glanceclient import exc HAS_GLANCE = True except ImportError: pass # Workaround, as the Glance API v2 requires you to # already have a keystone session token HAS_KEYSTONE = False try: from keystoneclient.v2_0 import client as kstone #import keystoneclient.apiclient.exceptions as kstone_exc HAS_KEYSTONE = True except ImportError: pass import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) import pprint def __virtual__(): ''' Only load this module if glance is installed on this minion. ''' if HAS_GLANCE: return 'glance' return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.') __opts__ = {} def _auth(profile=None, api_version=2, **connection_args): ''' Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules ''' __utils__['versions.warn_until']( 'Neon', ( 'The glance module has been deprecated and will be removed in {version}. ' 'Please update to using the glanceng module' ), ) if profile: prefix = profile + ":keystone." else: prefix = "keystone." def get(key, default=None): ''' Checks connection_args, then salt-minion config, falls back to specified default value. ''' return connection_args.get('connection_' + key, __salt__['config.get'](prefix + key, default)) user = get('user', 'admin') password = get('password', None) tenant = get('tenant', 'admin') tenant_id = get('tenant_id') auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0') insecure = get('insecure', False) admin_token = get('token') region = get('region') ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/') g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile) # The trailing 'v2' causes URLs like thise one: # http://127.0.0.1:9292/v2/v1/images g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl']) if admin_token and api_version != 1 and not password: # If we had a password we could just # ignore the admin-token and move on... raise SaltInvocationError('Only can use keystone admin token ' + 'with Glance API v1') elif password: # Can't use the admin-token anyway kwargs = {'username': user, 'password': password, 'tenant_id': tenant_id, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url, 'region_name': region, 'tenant_name': tenant} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: kwargs['insecure'] = True elif api_version == 1 and admin_token: kwargs = {'token': admin_token, 'auth_url': auth_url, 'endpoint_url': g_endpoint_url} else: raise SaltInvocationError('No credentials to authenticate with.') if HAS_KEYSTONE: log.debug('Calling keystoneclient.v2_0.client.Client(%s, **%s)', ks_endpoint, kwargs) keystone = kstone.Client(**kwargs) kwargs['token'] = keystone.get_token(keystone.session) # This doesn't realy prevent the password to show up # in the minion log as keystoneclient.session is # logging it anyway when in debug-mode kwargs.pop('password') log.debug('Calling glanceclient.client.Client(%s, %s, **%s)', api_version, g_endpoint_url, kwargs) # may raise exc.HTTPUnauthorized, exc.HTTPNotFound # but we deal with those elsewhere return client.Client(api_version, g_endpoint_url, **kwargs) else: raise NotImplementedError( "Can't retrieve a auth_token without keystone") def _add_image(collection, image): ''' Add image to given dictionary ''' image_prep = { 'id': image.id, 'name': image.name, 'created_at': image.created_at, 'file': image.file, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'owner': image.owner, 'protected': image.protected, 'status': image.status, 'tags': image.tags, 'updated_at': image.updated_at, 'visibility': image.visibility, } # Those cause AttributeErrors in Icehouse' glanceclient for attr in ['container_format', 'disk_format', 'size']: if attr in image: image_prep[attr] = image[attr] if type(collection) is dict: collection[image.name] = image_prep elif type(collection) is list: collection.append(image_prep) else: msg = '"collection" is {0}'.format(type(collection)) +\ 'instead of dict or list.' log.error(msg) raise TypeError(msg) return collection def image_create(name, location=None, profile=None, visibility=None, container_format='bare', disk_format='raw', protected=None,): ''' Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified. ''' kwargs = {} # valid options for "visibility": v_list = ['public', 'private'] # valid options for "container_format": cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf'] # valid options for "disk_format": df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] kwargs['copy_from'] = location if visibility is not None: if visibility not in v_list: raise SaltInvocationError('"visibility" needs to be one ' + 'of the following: {0}'.format(', '.join(v_list))) elif visibility == 'public': kwargs['is_public'] = True else: kwargs['is_public'] = False else: kwargs['is_public'] = True if container_format not in cf_list: raise SaltInvocationError('"container_format" needs to be ' + 'one of the following: {0}'.format(', '.join(cf_list))) else: kwargs['container_format'] = container_format if disk_format not in df_list: raise SaltInvocationError('"disk_format" needs to be one ' + 'of the following: {0}'.format(', '.join(df_list))) else: kwargs['disk_format'] = disk_format if protected is not None: kwargs['protected'] = protected # Icehouse's glanceclient doesn't have add_location() and # glanceclient.v2 doesn't implement Client.images.create() # in a usable fashion. Thus we have to use v1 for now. g_client = _auth(profile, api_version=1) image = g_client.images.create(name=name, **kwargs) return image_show(image.id, profile=profile) def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos ''' g_client = _auth(profile) image = {'id': False, 'name': None} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image id ' 'for name {0}'.format(name) } elif not name: name = image['name'] try: g_client.images.delete(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } except exc.HTTPForbidden as forbidden: log.error(six.text_type(forbidden)) return { 'result': False, 'comment': six.text_type(forbidden) } return { 'result': True, 'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id), } def image_show(id=None, name=None, profile=None): # pylint: disable=C0103 ''' Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show ''' g_client = _auth(profile) ret = {} if name: for image in g_client.images.list(): if image.name == name: id = image.id # pylint: disable=C0103 continue if not id: return { 'result': False, 'comment': 'Unable to resolve image ID ' 'for name \'{0}\''.format(name) } try: image = g_client.images.get(id) except exc.HTTPNotFound: return { 'result': False, 'comment': 'No image with ID {0}'.format(id) } log.debug( 'Properties of image %s:\n%s', image.name, pprint.PrettyPrinter(indent=4).pformat(image) ) schema = image_schema(profile=profile) if len(schema.keys()) == 1: schema = schema['image'] for key in schema: if key in image: ret[key] = image[key] return ret def image_list(id=None, profile=None, name=None): # pylint: disable=C0103 ''' Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list ''' g_client = _auth(profile) ret = [] for image in g_client.images.list(): if id is None and name is None: _add_image(ret, image) else: if id is not None and id == image.id: _add_image(ret, image) return ret if name == image.name: if name in ret and CUR_VER < BORON: # Not really worth an exception return { 'result': False, 'comment': 'More than one image with ' 'name "{0}"'.format(name) } _add_image(ret, image) log.debug('Returning images: %s', ret) return ret def image_schema(profile=None): ''' Returns names and descriptions of the schema "image"'s properties for this profile's instance of glance CLI Example: .. code-block:: bash salt '*' glance.image_schema ''' return schema_get('image', profile) def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103 ''' Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos ''' if id: image = image_show(id=id, profile=profile) if 'result' in image and not image['result']: return image elif len(image) == 1: image = image.values()[0] elif name: img_list = image_list(name=name, profile=profile) if img_list is dict and 'result' in img_list: return img_list elif not img_list: return { 'result': False, 'comment': 'No image with name \'{0}\' ' 'found.'.format(name) } elif len(img_list) == 1: try: image = img_list[0] except KeyError: image = img_list[name] else: raise SaltInvocationError log.debug('Found image:\n%s', image) to_update = {} for key, value in kwargs.items(): if key.startswith('_'): continue if key not in image or image[key] != value: log.debug('add <%s=%s> to to_update', key, value) to_update[key] = value g_client = _auth(profile) updated = g_client.images.update(image['id'], **to_update) return updated def schema_get(name, profile=None): ''' Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos ''' g_client = _auth(profile) schema_props = {} for prop in g_client.schemas.get(name).properties: schema_props[prop.name] = prop.description log.debug( 'Properties of schema %s:\n%s', name, pprint.PrettyPrinter(indent=4).pformat(schema_props) ) return {name: schema_props} # The following is a list of functions that need to be incorporated in the # glance module. This list should be updated as functions are added. # image-download Download a specific image. # member-create Share a specific image with a tenant. # member-delete Remove a shared image from a tenant. # member-list Describe sharing permissions by image or tenant.
saltstack/salt
salt/states/loop.py
until
python
def until(name, m_args=None, m_kwargs=None, condition=None, period=0, timeout=604800): ''' Loop over an execution module until a condition is met. name The name of the execution module m_args The execution module's positional arguments m_kwargs The execution module's keyword arguments condition The condition which must be met for the loop to break. This should contain ``m_ret`` which is the return from the execution module. period The number of seconds to wait between executions timeout The timeout in seconds ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if name not in __salt__: ret['comment'] = 'Cannot find module {0}'.format(name) return ret if condition is None: ret['comment'] = 'An exit condition must be specified' return ret if not isinstance(period, int): ret['comment'] = 'Period must be specified as an integer in seconds' return ret if not isinstance(timeout, int): ret['comment'] = 'Timeout must be specified as an integer in seconds' return ret if __opts__['test']: ret['comment'] = 'The execution module {0} will be run'.format(name) ret['result'] = None return ret if not m_args: m_args = [] if not m_kwargs: m_kwargs = {} def timed_out(): if time.time() >= timeout: return True return False timeout = time.time() + timeout while not timed_out(): m_ret = __salt__[name](*m_args, **m_kwargs) if eval(condition): # pylint: disable=W0123 ret['result'] = True ret['comment'] = 'Condition {0} was met'.format(condition) return ret time.sleep(period) ret['comment'] = 'Timed out while waiting for condition {0}'.format(condition) return ret
Loop over an execution module until a condition is met. name The name of the execution module m_args The execution module's positional arguments m_kwargs The execution module's keyword arguments condition The condition which must be met for the loop to break. This should contain ``m_ret`` which is the return from the execution module. period The number of seconds to wait between executions timeout The timeout in seconds
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/loop.py#L47-L118
[ "def timed_out():\n if time.time() >= timeout:\n return True\n return False\n" ]
# -*- coding: utf-8 -*- ''' Loop state Allows for looping over execution modules. .. versionadded:: 2017.7.0 .. code-block:: yaml wait_for_service_to_be_healthy: loop.until: - name: boto_elb.get_instance_health - condition: m_ret[0]['state'] == 'InService' - period: 5 - timeout: 20 - m_args: - {{ elb }} - m_kwargs: keyid: {{ access_key }} key: {{ secret_key }} instances: "{{ instance }}" .. warning:: This state allows arbitrary python code to be executed through the condition parameter which is literally evaluated within the state. Please use caution. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time # Initialize logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'loop' def __virtual__(): return True
saltstack/salt
salt/cloud/clouds/proxmox.py
_authenticate
python
def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken'])
Retrieve CSRF and API tickets for the Proxmox API
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L116-L146
null
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
query
python
def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response)
Execute the HTTP request to the API
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L149-L196
[ "def _authenticate():\n '''\n Retrieve CSRF and API tickets for the Proxmox API\n '''\n global url, port, ticket, csrf, verify_ssl\n url = config.get_cloud_config_value(\n 'url', get_configured_provider(), __opts__, search_global=False\n )\n port = config.get_cloud_config_value(\n 'port', get_configured_provider(), __opts__,\n default=8006, search_global=False\n )\n username = config.get_cloud_config_value(\n 'user', get_configured_provider(), __opts__, search_global=False\n ),\n passwd = config.get_cloud_config_value(\n 'password', get_configured_provider(), __opts__, search_global=False\n )\n verify_ssl = config.get_cloud_config_value(\n 'verify_ssl', get_configured_provider(), __opts__,\n default=True, search_global=False\n )\n\n connect_data = {'username': username, 'password': passwd}\n full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port)\n\n returned_data = requests.post(\n full_url, verify=verify_ssl, data=connect_data).json()\n\n ticket = {'PVEAuthCookie': returned_data['data']['ticket']}\n csrf = six.text_type(returned_data['data']['CSRFPreventionToken'])\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
_get_vm_by_name
python
def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False
Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L199-L209
[ "def get_resources_vms(call=None, resFilter=None, includeConfig=True):\n '''\n Retrieve all VMs available on this environment\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_resources_vms my-proxmox-config\n '''\n\n timeoutTime = time.time() + 60\n while True:\n log.debug('Getting resource: vms.. (filter: %s)', resFilter)\n resources = query('get', 'cluster/resources')\n ret = {}\n badResource = False\n for resource in resources:\n if 'type' in resource and resource['type'] in ['openvz', 'qemu',\n 'lxc']:\n try:\n name = resource['name']\n except KeyError:\n badResource = True\n log.debug('No name in VM resource %s', repr(resource))\n break\n\n ret[name] = resource\n\n if includeConfig:\n # Requested to include the detailed configuration of a VM\n ret[name]['config'] = get_vmconfig(\n ret[name]['vmid'],\n ret[name]['node'],\n ret[name]['type']\n )\n\n if time.time() > timeoutTime:\n raise SaltCloudExecutionTimeout('FAILED to get the proxmox '\n 'resources vms')\n\n # Carry on if there wasn't a bad resource return from Proxmox\n if not badResource:\n break\n\n time.sleep(0.5)\n\n if resFilter is not None:\n log.debug('Filter given: %s, returning requested '\n 'resource: nodes', resFilter)\n return ret[resFilter]\n\n log.debug('Filter not given: %s, returning all resource: nodes', ret)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
_get_vm_by_id
python
def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False
Retrieve a VM based on the ID.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L212-L221
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def get_resources_vms(call=None, resFilter=None, includeConfig=True):\n '''\n Retrieve all VMs available on this environment\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_resources_vms my-proxmox-config\n '''\n\n timeoutTime = time.time() + 60\n while True:\n log.debug('Getting resource: vms.. (filter: %s)', resFilter)\n resources = query('get', 'cluster/resources')\n ret = {}\n badResource = False\n for resource in resources:\n if 'type' in resource and resource['type'] in ['openvz', 'qemu',\n 'lxc']:\n try:\n name = resource['name']\n except KeyError:\n badResource = True\n log.debug('No name in VM resource %s', repr(resource))\n break\n\n ret[name] = resource\n\n if includeConfig:\n # Requested to include the detailed configuration of a VM\n ret[name]['config'] = get_vmconfig(\n ret[name]['vmid'],\n ret[name]['node'],\n ret[name]['type']\n )\n\n if time.time() > timeoutTime:\n raise SaltCloudExecutionTimeout('FAILED to get the proxmox '\n 'resources vms')\n\n # Carry on if there wasn't a bad resource return from Proxmox\n if not badResource:\n break\n\n time.sleep(0.5)\n\n if resFilter is not None:\n log.debug('Filter given: %s, returning requested '\n 'resource: nodes', resFilter)\n return ret[resFilter]\n\n log.debug('Filter not given: %s, returning all resource: nodes', ret)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
_check_ip_available
python
def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True
Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L232-L245
null
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
_parse_proxmox_upid
python
def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret
Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L248-L272
null
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
_lookup_proxmox_task
python
def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False
Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L275-L289
[ "def query(conn_type, option, post_data=None):\n '''\n Execute the HTTP request to the API\n '''\n if ticket is None or csrf is None or url is None:\n log.debug('Not authenticated yet, doing that now..')\n _authenticate()\n\n full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option)\n\n log.debug('%s: %s (%s)', conn_type, full_url, post_data)\n\n httpheaders = {'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'salt-cloud-proxmox'}\n\n if conn_type == 'post':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.post(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'put':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.put(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'delete':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.delete(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'get':\n response = requests.get(full_url, verify=verify_ssl,\n cookies=ticket)\n\n response.raise_for_status()\n\n try:\n returned_data = response.json()\n if 'data' not in returned_data:\n raise SaltCloudExecutionFailure\n return returned_data['data']\n except Exception:\n log.error('Error in trying to process JSON')\n log.error(response)\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
get_resources_nodes
python
def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret
Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L292-L316
[ "def query(conn_type, option, post_data=None):\n '''\n Execute the HTTP request to the API\n '''\n if ticket is None or csrf is None or url is None:\n log.debug('Not authenticated yet, doing that now..')\n _authenticate()\n\n full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option)\n\n log.debug('%s: %s (%s)', conn_type, full_url, post_data)\n\n httpheaders = {'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'salt-cloud-proxmox'}\n\n if conn_type == 'post':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.post(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'put':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.put(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'delete':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.delete(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'get':\n response = requests.get(full_url, verify=verify_ssl,\n cookies=ticket)\n\n response.raise_for_status()\n\n try:\n returned_data = response.json()\n if 'data' not in returned_data:\n raise SaltCloudExecutionFailure\n return returned_data['data']\n except Exception:\n log.error('Error in trying to process JSON')\n log.error(response)\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
get_resources_vms
python
def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret
Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L319-L372
[ "def query(conn_type, option, post_data=None):\n '''\n Execute the HTTP request to the API\n '''\n if ticket is None or csrf is None or url is None:\n log.debug('Not authenticated yet, doing that now..')\n _authenticate()\n\n full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option)\n\n log.debug('%s: %s (%s)', conn_type, full_url, post_data)\n\n httpheaders = {'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'salt-cloud-proxmox'}\n\n if conn_type == 'post':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.post(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'put':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.put(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'delete':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.delete(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'get':\n response = requests.get(full_url, verify=verify_ssl,\n cookies=ticket)\n\n response.raise_for_status()\n\n try:\n returned_data = response.json()\n if 'data' not in returned_data:\n raise SaltCloudExecutionFailure\n return returned_data['data']\n except Exception:\n log.error('Error in trying to process JSON')\n log.error(response)\n", "def get_vmconfig(vmid, node=None, node_type='openvz'):\n '''\n Get VM configuration\n '''\n if node is None:\n # We need to figure out which node this VM is on.\n for host_name, host_details in six.iteritems(avail_locations()):\n for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)):\n if item['vmid'] == vmid:\n node = host_name\n\n # If we reached this point, we have all the information we need\n data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid))\n\n return data\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
script
python
def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) )
Return the script deployment object
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L375-L390
[ "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n", "def minion_config(opts, vm_):\n '''\n Return a minion's configuration for the provided options and VM\n '''\n\n # Don't start with a copy of the default minion opts; they're not always\n # what we need. Some default options are Null, let's set a reasonable default\n minion = {\n 'master': 'salt',\n 'log_level': 'info',\n 'hash_type': 'sha256',\n }\n\n # Now, let's update it to our needs\n minion['id'] = vm_['name']\n master_finger = salt.config.get_cloud_config_value('master_finger', vm_, opts)\n if master_finger is not None:\n minion['master_finger'] = master_finger\n minion.update(\n # Get ANY defined minion settings, merging data, in the following order\n # 1. VM config\n # 2. Profile config\n # 3. Global configuration\n salt.config.get_cloud_config_value(\n 'minion', vm_, opts, default={}, search_global=True\n )\n )\n\n make_master = salt.config.get_cloud_config_value('make_master', vm_, opts)\n if 'master' not in minion and make_master is not True:\n raise SaltCloudConfigError(\n 'A master setting was not defined in the minion\\'s configuration.'\n )\n\n # Get ANY defined grains settings, merging data, in the following order\n # 1. VM config\n # 2. Profile config\n # 3. Global configuration\n minion.setdefault('grains', {}).update(\n salt.config.get_cloud_config_value(\n 'grains', vm_, opts, default={}, search_global=True\n )\n )\n return minion\n", "def os_script(os_, vm_=None, opts=None, minion=''):\n '''\n Return the script as a string for the specific os\n '''\n if minion:\n minion = salt_config_to_yaml(minion)\n\n if os.path.isabs(os_):\n # The user provided an absolute path to the deploy script, let's use it\n return __render_script(os_, vm_, opts, minion)\n\n if os.path.isabs('{0}.sh'.format(os_)):\n # The user provided an absolute path to the deploy script, although no\n # extension was provided. Let's use it anyway.\n return __render_script('{0}.sh'.format(os_), vm_, opts, minion)\n\n for search_path in opts['deploy_scripts_search_path']:\n if os.path.isfile(os.path.join(search_path, os_)):\n return __render_script(\n os.path.join(search_path, os_), vm_, opts, minion\n )\n\n if os.path.isfile(os.path.join(search_path, '{0}.sh'.format(os_))):\n return __render_script(\n os.path.join(search_path, '{0}.sh'.format(os_)),\n vm_, opts, minion\n )\n # No deploy script was found, return an empty string\n return ''\n", "def salt_config_to_yaml(configuration, line_break='\\n'):\n '''\n Return a salt configuration dictionary, master or minion, as a yaml dump\n '''\n return salt.utils.yaml.safe_dump(\n configuration,\n line_break=line_break,\n default_flow_style=False)\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
avail_locations
python
def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret
Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L393-L417
[ "def query(conn_type, option, post_data=None):\n '''\n Execute the HTTP request to the API\n '''\n if ticket is None or csrf is None or url is None:\n log.debug('Not authenticated yet, doing that now..')\n _authenticate()\n\n full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option)\n\n log.debug('%s: %s (%s)', conn_type, full_url, post_data)\n\n httpheaders = {'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'salt-cloud-proxmox'}\n\n if conn_type == 'post':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.post(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'put':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.put(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'delete':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.delete(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'get':\n response = requests.get(full_url, verify=verify_ssl,\n cookies=ticket)\n\n response.raise_for_status()\n\n try:\n returned_data = response.json()\n if 'data' not in returned_data:\n raise SaltCloudExecutionFailure\n return returned_data['data']\n except Exception:\n log.error('Error in trying to process JSON')\n log.error(response)\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
avail_images
python
def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret
Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L420-L440
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def query(conn_type, option, post_data=None):\n '''\n Execute the HTTP request to the API\n '''\n if ticket is None or csrf is None or url is None:\n log.debug('Not authenticated yet, doing that now..')\n _authenticate()\n\n full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option)\n\n log.debug('%s: %s (%s)', conn_type, full_url, post_data)\n\n httpheaders = {'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'salt-cloud-proxmox'}\n\n if conn_type == 'post':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.post(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'put':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.put(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'delete':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.delete(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'get':\n response = requests.get(full_url, verify=verify_ssl,\n cookies=ticket)\n\n response.raise_for_status()\n\n try:\n returned_data = response.json()\n if 'data' not in returned_data:\n raise SaltCloudExecutionFailure\n return returned_data['data']\n except Exception:\n log.error('Error in trying to process JSON')\n log.error(response)\n", "def avail_locations(call=None):\n '''\n Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud --list-locations my-proxmox-config\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The avail_locations function must be called with '\n '-f or --function, or with the --list-locations option'\n )\n\n # could also use the get_resources_nodes but speed is ~the same\n nodes = query('get', 'nodes')\n\n ret = {}\n for node in nodes:\n name = node['node']\n ret[name] = node\n\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
list_nodes
python
def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret
Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L443-L485
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def get_resources_vms(call=None, resFilter=None, includeConfig=True):\n '''\n Retrieve all VMs available on this environment\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_resources_vms my-proxmox-config\n '''\n\n timeoutTime = time.time() + 60\n while True:\n log.debug('Getting resource: vms.. (filter: %s)', resFilter)\n resources = query('get', 'cluster/resources')\n ret = {}\n badResource = False\n for resource in resources:\n if 'type' in resource and resource['type'] in ['openvz', 'qemu',\n 'lxc']:\n try:\n name = resource['name']\n except KeyError:\n badResource = True\n log.debug('No name in VM resource %s', repr(resource))\n break\n\n ret[name] = resource\n\n if includeConfig:\n # Requested to include the detailed configuration of a VM\n ret[name]['config'] = get_vmconfig(\n ret[name]['vmid'],\n ret[name]['node'],\n ret[name]['type']\n )\n\n if time.time() > timeoutTime:\n raise SaltCloudExecutionTimeout('FAILED to get the proxmox '\n 'resources vms')\n\n # Carry on if there wasn't a bad resource return from Proxmox\n if not badResource:\n break\n\n time.sleep(0.5)\n\n if resFilter is not None:\n log.debug('Filter given: %s, returning requested '\n 'resource: nodes', resFilter)\n return ret[resFilter]\n\n log.debug('Filter not given: %s, returning all resource: nodes', ret)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
_stringlist_to_dictionary
python
def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret
Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L521-L538
null
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
_dictionary_to_stringlist
python
def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value
Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L541-L553
null
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
create
python
def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret
Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L556-L867
[ "def start(name, vmid=None, call=None):\n '''\n Start a node.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -a start mymachine\n '''\n if call != 'action':\n raise SaltCloudSystemExit(\n 'The start action must be called with -a or --action.'\n )\n\n log.debug('Start: %s (%s) = Start', name, vmid)\n if not set_vm_status('start', name, vmid=vmid):\n log.error('Unable to bring VM %s (%s) up..', name, vmid)\n raise SaltCloudExecutionFailure\n\n # xxx: TBD: Check here whether the status was actually changed to 'started'\n\n return {'Started': '{0} was started.'.format(name)}\n", "def destroy(name, call=None):\n '''\n Destroy a node.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud --destroy mymachine\n '''\n if call == 'function':\n raise SaltCloudSystemExit(\n 'The destroy action must be called with -d, --destroy, '\n '-a or --action.'\n )\n\n __utils__['cloud.fire_event'](\n 'event',\n 'destroying instance',\n 'salt/cloud/{0}/destroying'.format(name),\n args={'name': name},\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n\n vmobj = _get_vm_by_name(name)\n if vmobj is not None:\n # stop the vm\n if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped':\n stop(name, vmobj['vmid'], 'action')\n\n # wait until stopped\n if not wait_for_state(vmobj['vmid'], 'stopped'):\n return {'Error': 'Unable to stop {0}, command timed out'.format(name)}\n\n # required to wait a bit here, otherwise the VM is sometimes\n # still locked and destroy fails.\n time.sleep(3)\n\n query('delete', 'nodes/{0}/{1}'.format(\n vmobj['node'], vmobj['id']\n ))\n __utils__['cloud.fire_event'](\n 'event',\n 'destroyed instance',\n 'salt/cloud/{0}/destroyed'.format(name),\n args={'name': name},\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n if __opts__.get('update_cachedir', False) is True:\n __utils__['cloud.delete_minion_cachedir'](\n name, __active_provider_name__.split(':')[0], __opts__)\n\n return {'Destroyed': '{0} was destroyed.'.format(name)}\n", "def query(conn_type, option, post_data=None):\n '''\n Execute the HTTP request to the API\n '''\n if ticket is None or csrf is None or url is None:\n log.debug('Not authenticated yet, doing that now..')\n _authenticate()\n\n full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option)\n\n log.debug('%s: %s (%s)', conn_type, full_url, post_data)\n\n httpheaders = {'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'salt-cloud-proxmox'}\n\n if conn_type == 'post':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.post(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'put':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.put(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'delete':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.delete(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'get':\n response = requests.get(full_url, verify=verify_ssl,\n cookies=ticket)\n\n response.raise_for_status()\n\n try:\n returned_data = response.json()\n if 'data' not in returned_data:\n raise SaltCloudExecutionFailure\n return returned_data['data']\n except Exception:\n log.error('Error in trying to process JSON')\n log.error(response)\n", "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n", "def is_profile_configured(opts, provider, profile_name, vm_=None):\n '''\n Check if the requested profile contains the minimum required parameters for\n a profile.\n\n Required parameters include image and provider for all drivers, while some\n drivers also require size keys.\n\n .. versionadded:: 2015.8.0\n '''\n # Standard dict keys required by all drivers.\n required_keys = ['provider']\n alias, driver = provider.split(':')\n\n # Most drivers need an image to be specified, but some do not.\n non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone', 'profitbricks']\n\n # Most drivers need a size, but some do not.\n non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',\n 'softlayer', 'softlayer_hw', 'vmware', 'vsphere',\n 'virtualbox', 'libvirt', 'oneandone', 'profitbricks']\n\n provider_key = opts['providers'][alias][driver]\n profile_key = opts['providers'][alias][driver]['profiles'][profile_name]\n\n # If cloning on Linode, size and image are not necessary.\n # They are obtained from the to-be-cloned VM.\n if driver == 'linode' and profile_key.get('clonefrom', False):\n non_image_drivers.append('linode')\n non_size_drivers.append('linode')\n elif driver == 'gce' and 'sourceImage' in six.text_type(vm_.get('ex_disks_gce_struct')):\n non_image_drivers.append('gce')\n\n # If cloning on VMware, specifying image is not necessary.\n if driver == 'vmware' and 'image' not in list(profile_key.keys()):\n non_image_drivers.append('vmware')\n\n if driver not in non_image_drivers:\n required_keys.append('image')\n if driver == 'vmware':\n required_keys.append('datastore')\n elif driver in ['linode', 'virtualbox']:\n required_keys.append('clonefrom')\n elif driver == 'nova':\n nova_image_keys = ['image', 'block_device_mapping', 'block_device', 'boot_volume']\n if not any([key in provider_key for key in nova_image_keys]) and not any([key in profile_key for key in nova_image_keys]):\n required_keys.extend(nova_image_keys)\n\n if driver not in non_size_drivers:\n required_keys.append('size')\n\n # Check if required fields are supplied in the provider config. If they\n # are present, remove it from the required_keys list.\n for item in list(required_keys):\n if item in provider_key:\n required_keys.remove(item)\n\n # If a vm_ dict was passed in, use that information to get any other configs\n # that we might have missed thus far, such as a option provided in a map file.\n if vm_:\n for item in list(required_keys):\n if item in vm_:\n required_keys.remove(item)\n\n # Check for remaining required parameters in the profile config.\n for item in required_keys:\n if profile_key.get(item, None) is None:\n # There's at least one required configuration item which is not set.\n log.error(\n \"The required '%s' configuration setting is missing from \"\n \"the '%s' profile, which is configured under the '%s' alias.\",\n item, profile_name, alias\n )\n return False\n\n return True\n", "def create_node(vm_, newid):\n '''\n Build and submit the requestdata to create a new node\n '''\n newnode = {}\n\n if 'technology' not in vm_:\n vm_['technology'] = 'openvz' # default virt tech if none is given\n\n if vm_['technology'] not in ['qemu', 'openvz', 'lxc']:\n # Wrong VM type given\n log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)')\n raise SaltCloudExecutionFailure\n\n if 'host' not in vm_:\n # Use globally configured/default location\n vm_['host'] = config.get_cloud_config_value(\n 'default_host', get_configured_provider(), __opts__, search_global=False\n )\n\n if vm_['host'] is None:\n # No location given for the profile\n log.error('No host given to create this VM on')\n raise SaltCloudExecutionFailure\n\n # Required by both OpenVZ and Qemu (KVM)\n vmhost = vm_['host']\n newnode['vmid'] = newid\n\n for prop in 'cpuunits', 'description', 'memory', 'onboot':\n if prop in vm_: # if the property is set, use it for the VM request\n newnode[prop] = vm_[prop]\n\n if vm_['technology'] == 'openvz':\n # OpenVZ related settings, using non-default names:\n newnode['hostname'] = vm_['name']\n newnode['ostemplate'] = vm_['image']\n\n # optional VZ settings\n for prop in ['cpus', 'disk', 'ip_address', 'nameserver',\n 'password', 'swap', 'poolid', 'storage']:\n if prop in vm_: # if the property is set, use it for the VM request\n newnode[prop] = vm_[prop]\n\n elif vm_['technology'] == 'lxc':\n # LXC related settings, using non-default names:\n newnode['hostname'] = vm_['name']\n newnode['ostemplate'] = vm_['image']\n\n static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory',\n 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs')\n for prop in _get_properties('/nodes/{node}/lxc',\n 'POST',\n static_props):\n if prop in vm_: # if the property is set, use it for the VM request\n newnode[prop] = vm_[prop]\n\n if 'pubkey' in vm_:\n newnode['ssh-public-keys'] = vm_['pubkey']\n\n # inform user the \"disk\" option is not supported for LXC hosts\n if 'disk' in vm_:\n log.warning('The \"disk\" option is not supported for LXC hosts and was ignored')\n\n # LXC specific network config\n # OpenVZ allowed specifying IP and gateway. To ease migration from\n # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config.\n # If you need more control, please use the net0 option directly.\n # This also assumes a /24 subnet.\n if 'ip_address' in vm_ and 'net0' not in vm_:\n newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth'\n\n # gateway is optional and does not assume a default\n if 'gw' in vm_:\n newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw']\n\n elif vm_['technology'] == 'qemu':\n # optional Qemu settings\n static_props = (\n 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0')\n for prop in _get_properties('/nodes/{node}/qemu',\n 'POST',\n static_props):\n if prop in vm_: # if the property is set, use it for the VM request\n newnode[prop] = vm_[prop]\n\n # The node is ready. Lets request it to be added\n __utils__['cloud.fire_event'](\n 'event',\n 'requesting instance',\n 'salt/cloud/{0}/requesting'.format(vm_['name']),\n args={\n 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)),\n },\n sock_dir=__opts__['sock_dir'],\n )\n\n log.debug('Preparing to generate a node using these parameters: %s ', newnode)\n if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu':\n postParams = {}\n postParams['newid'] = newnode['vmid']\n\n for prop in 'description', 'format', 'full', 'name':\n if 'clone_' + prop in vm_: # if the property is set, use it for the VM request\n postParams[prop] = vm_['clone_' + prop]\n\n if 'host' in vm_:\n postParams['target'] = vm_['host']\n\n try:\n int(vm_['clone_from'])\n except ValueError:\n if ':' in vm_['clone_from']:\n vmhost = vm_['clone_from'].split(':')[0]\n vm_['clone_from'] = vm_['clone_from'].split(':')[1]\n\n node = query('post', 'nodes/{0}/qemu/{1}/clone'.format(\n vmhost, vm_['clone_from']), postParams)\n else:\n node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode)\n return _parse_proxmox_upid(node, vm_)\n", "def _get_next_vmid():\n '''\n Proxmox allows the use of alternative ids instead of autoincrementing.\n Because of that its required to query what the first available ID is.\n '''\n return int(query('get', 'cluster/nextid'))\n", "def _parse_proxmox_upid(node, vm_=None):\n '''\n Upon requesting a task that runs for a longer period of time a UPID is given.\n This includes information about the job and can be used to lookup information in the log.\n '''\n ret = {}\n\n upid = node\n # Parse node response\n node = node.split(':')\n if node[0] == 'UPID':\n ret['node'] = six.text_type(node[1])\n ret['pid'] = six.text_type(node[2])\n ret['pstart'] = six.text_type(node[3])\n ret['starttime'] = six.text_type(node[4])\n ret['type'] = six.text_type(node[5])\n ret['vmid'] = six.text_type(node[6])\n ret['user'] = six.text_type(node[7])\n # include the upid again in case we'll need it again\n ret['upid'] = six.text_type(upid)\n\n if vm_ is not None and 'technology' in vm_:\n ret['technology'] = six.text_type(vm_['technology'])\n\n return ret\n", "def _stringlist_to_dictionary(input_string):\n '''\n Convert a stringlist (comma separated settings) to a dictionary\n\n The result of the string setting1=value1,setting2=value2 will be a python dictionary:\n\n {'setting1':'value1','setting2':'value2'}\n '''\n li = str(input_string).split(',')\n ret = {}\n for item in li:\n pair = str(item).replace(' ', '').split('=')\n if len(pair) != 2:\n log.warning('Cannot process stringlist item %s', item)\n continue\n\n ret[pair[0]] = pair[1]\n return ret\n", "def _dictionary_to_stringlist(input_dict):\n '''\n Convert a dictionary to a stringlist (comma separated settings)\n\n The result of the dictionary {'setting1':'value1','setting2':'value2'} will be:\n\n setting1=value1,setting2=value2\n '''\n string_value = \"\"\n for s in input_dict:\n string_value += \"{0}={1},\".format(s, input_dict[s])\n string_value = string_value[:-1]\n return string_value\n", "def wait_for_created(upid, timeout=300):\n '''\n Wait until a the vm has been created successfully\n '''\n start_time = time.time()\n info = _lookup_proxmox_task(upid)\n if not info:\n log.error('wait_for_created: No task information '\n 'retrieved based on given criteria.')\n raise SaltCloudExecutionFailure\n\n while True:\n if 'status' in info and info['status'] == 'OK':\n log.debug('Host has been created!')\n return True\n time.sleep(3) # Little more patience, we're not in a hurry\n if time.time() - start_time > timeout:\n log.debug('Timeout reached while waiting for host to be created')\n return False\n info = _lookup_proxmox_task(upid)\n", "def wait_for_task(upid, timeout=300):\n '''\n Wait until a the task has been finished successfully\n '''\n start_time = time.time()\n info = _lookup_proxmox_task(upid)\n if not info:\n log.error('wait_for_task: No task information '\n 'retrieved based on given criteria.')\n raise SaltCloudExecutionFailure\n\n while True:\n if 'status' in info and info['status'] == 'OK':\n log.debug('Task has been finished!')\n return True\n time.sleep(3) # Little more patience, we're not in a hurry\n if time.time() - start_time > timeout:\n log.debug('Timeout reached while waiting for task to be finished')\n return False\n info = _lookup_proxmox_task(upid)\n", "def wait_for_state(vmid, state, timeout=300):\n '''\n Wait until a specific state has been reached on a node\n '''\n start_time = time.time()\n node = get_vm_status(vmid=vmid)\n if not node:\n log.error('wait_for_state: No VM retrieved based on given criteria.')\n raise SaltCloudExecutionFailure\n\n while True:\n if node['status'] == state:\n log.debug('Host %s is now in \"%s\" state!', node['name'], state)\n return True\n time.sleep(1)\n if time.time() - start_time > timeout:\n log.debug('Timeout reached while waiting for %s to become %s',\n node['name'], state)\n return False\n node = get_vm_status(vmid=vmid)\n log.debug('State for %s is: \"%s\" instead of \"%s\"',\n node['name'], node['status'], state)\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
_import_api
python
def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json)
Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api"
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L870-L882
null
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
_get_properties
python
def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters
Return the parameter list from api for defined path and HTTP method
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L885-L919
null
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}
saltstack/salt
salt/cloud/clouds/proxmox.py
create_node
python
def create_node(vm_, newid): ''' Build and submit the requestdata to create a new node ''' newnode = {} if 'technology' not in vm_: vm_['technology'] = 'openvz' # default virt tech if none is given if vm_['technology'] not in ['qemu', 'openvz', 'lxc']: # Wrong VM type given log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)') raise SaltCloudExecutionFailure if 'host' not in vm_: # Use globally configured/default location vm_['host'] = config.get_cloud_config_value( 'default_host', get_configured_provider(), __opts__, search_global=False ) if vm_['host'] is None: # No location given for the profile log.error('No host given to create this VM on') raise SaltCloudExecutionFailure # Required by both OpenVZ and Qemu (KVM) vmhost = vm_['host'] newnode['vmid'] = newid for prop in 'cpuunits', 'description', 'memory', 'onboot': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if vm_['technology'] == 'openvz': # OpenVZ related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] # optional VZ settings for prop in ['cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage']: if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] elif vm_['technology'] == 'lxc': # LXC related settings, using non-default names: newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] if 'pubkey' in vm_: newnode['ssh-public-keys'] = vm_['pubkey'] # inform user the "disk" option is not supported for LXC hosts if 'disk' in vm_: log.warning('The "disk" option is not supported for LXC hosts and was ignored') # LXC specific network config # OpenVZ allowed specifying IP and gateway. To ease migration from # Proxmox 3, I've mapped the ip_address and gw to a generic net0 config. # If you need more control, please use the net0 option directly. # This also assumes a /24 subnet. if 'ip_address' in vm_ and 'net0' not in vm_: newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth' # gateway is optional and does not assume a default if 'gw' in vm_: newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw'] elif vm_['technology'] == 'qemu': # optional Qemu settings static_props = ( 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] # The node is ready. Lets request it to be added __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)), }, sock_dir=__opts__['sock_dir'], ) log.debug('Preparing to generate a node using these parameters: %s ', newnode) if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': postParams = {} postParams['newid'] = newnode['vmid'] for prop in 'description', 'format', 'full', 'name': if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] if 'host' in vm_: postParams['target'] = vm_['host'] try: int(vm_['clone_from']) except ValueError: if ':' in vm_['clone_from']: vmhost = vm_['clone_from'].split(':')[0] vm_['clone_from'] = vm_['clone_from'].split(':')[1] node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_)
Build and submit the requestdata to create a new node
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L922-L1042
[ "def query(conn_type, option, post_data=None):\n '''\n Execute the HTTP request to the API\n '''\n if ticket is None or csrf is None or url is None:\n log.debug('Not authenticated yet, doing that now..')\n _authenticate()\n\n full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option)\n\n log.debug('%s: %s (%s)', conn_type, full_url, post_data)\n\n httpheaders = {'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'salt-cloud-proxmox'}\n\n if conn_type == 'post':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.post(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'put':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.put(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'delete':\n httpheaders['CSRFPreventionToken'] = csrf\n response = requests.delete(full_url, verify=verify_ssl,\n data=post_data,\n cookies=ticket,\n headers=httpheaders)\n elif conn_type == 'get':\n response = requests.get(full_url, verify=verify_ssl,\n cookies=ticket)\n\n response.raise_for_status()\n\n try:\n returned_data = response.json()\n if 'data' not in returned_data:\n raise SaltCloudExecutionFailure\n return returned_data['data']\n except Exception:\n log.error('Error in trying to process JSON')\n log.error(response)\n", "def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n", "def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('user',)\n )\n", "def _parse_proxmox_upid(node, vm_=None):\n '''\n Upon requesting a task that runs for a longer period of time a UPID is given.\n This includes information about the job and can be used to lookup information in the log.\n '''\n ret = {}\n\n upid = node\n # Parse node response\n node = node.split(':')\n if node[0] == 'UPID':\n ret['node'] = six.text_type(node[1])\n ret['pid'] = six.text_type(node[2])\n ret['pstart'] = six.text_type(node[3])\n ret['starttime'] = six.text_type(node[4])\n ret['type'] = six.text_type(node[5])\n ret['vmid'] = six.text_type(node[6])\n ret['user'] = six.text_type(node[7])\n # include the upid again in case we'll need it again\n ret['upid'] = six.text_type(upid)\n\n if vm_ is not None and 'technology' in vm_:\n ret['technology'] = six.text_type(vm_['technology'])\n\n return ret\n", "def _get_properties(path=\"\", method=\"GET\", forced_params=None):\n '''\n Return the parameter list from api for defined path and HTTP method\n '''\n if api is None:\n _import_api()\n\n sub = api\n path_levels = [level for level in path.split('/') if level != '']\n search_path = ''\n props = []\n parameters = set([] if forced_params is None else forced_params)\n # Browse all path elements but last\n for elem in path_levels[:-1]:\n search_path += '/' + elem\n # Lookup for a dictionary with path = \"requested path\" in list\" and return its children\n sub = (item for item in sub if item[\"path\"] == search_path).next()['children']\n # Get leaf element in path\n search_path += '/' + path_levels[-1]\n sub = next((item for item in sub if item[\"path\"] == search_path))\n try:\n # get list of properties for requested method\n props = sub['info'][method]['parameters']['properties'].keys()\n except KeyError as exc:\n log.error('method not found: \"%s\"', exc)\n for prop in props:\n numerical = re.match(r'(\\w+)\\[n\\]', prop)\n # generate (arbitrarily) 10 properties for duplicatable properties identified by:\n # \"prop[n]\"\n if numerical:\n for i in range(10):\n parameters.add(numerical.group(1) + six.text_type(i))\n else:\n parameters.add(prop)\n return parameters\n" ]
# -*- coding: utf-8 -*- ''' Proxmox Cloud Module ====================== .. versionadded:: 2014.7.0 The Proxmox cloud module is used to control access to cloud providers using the Proxmox system (KVM / OpenVZ / LXC). Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/proxmox.conf``: .. code-block:: yaml my-proxmox-config: # Proxmox account information user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld port: 8006 driver: proxmox verify_ssl: True :maintainer: Frank Klaassen <frank@cloudright.nl> :depends: requests >= 2.2.1 :depends: IPy >= 0.81 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import time import pprint import logging import re # Import salt libs import salt.utils.cloud import salt.utils.json # Import salt cloud libs import salt.config as config from salt.exceptions import ( SaltCloudSystemExit, SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) # Import 3rd-party Libs from salt.ext import six from salt.ext.six.moves import range try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False try: from IPy import IP HAS_IPY = True except ImportError: HAS_IPY = False # Get logging started log = logging.getLogger(__name__) __virtualname__ = 'proxmox' def __virtual__(): ''' Check for PROXMOX configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('user',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' deps = { 'requests': HAS_REQUESTS, 'IPy': HAS_IPY } return config.check_driver_dependencies( __virtualname__, deps ) url = None port = None ticket = None csrf = None verify_ssl = None api = None def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' global url, port, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) port = config.get_cloud_config_value( 'port', get_configured_provider(), __opts__, default=8006, search_global=False ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) verify_ssl = config.get_cloud_config_value( 'verify_ssl', get_configured_provider(), __opts__, default=True, search_global=False ) connect_data = {'username': username, 'password': passwd} full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() ticket = {'PVEAuthCookie': returned_data['data']['ticket']} csrf = six.text_type(returned_data['data']['CSRFPreventionToken']) def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response) def _get_vm_by_name(name, allDetails=False): ''' Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information. ''' vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False def _get_vm_by_id(vmid, allDetails=False): ''' Retrieve a VM based on the ID. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False def _get_next_vmid(): ''' Proxmox allows the use of alternative ids instead of autoincrementing. Because of that its required to query what the first available ID is. ''' return int(query('get', 'cluster/nextid')) def _check_ip_available(ip_addr): ''' Proxmox VMs refuse to start when the IP is already being used. This function can be used to prevent VMs being created with duplicate IP's or to generate a warning. ''' for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): vm_config = vm_details['config'] if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr: log.debug('IP "%s" is already defined', ip_addr) return False log.debug('IP \'%s\' is available to be defined', ip_addr) return True def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False def get_resources_nodes(call=None, resFilter=None): ''' Retrieve all hypervisors (nodes) available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_nodes my-proxmox-config ''' log.debug('Getting resource: nodes.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} for resource in resources: if 'type' in resource and resource['type'] == 'node': name = resource['node'] ret[name] = resource if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def get_resources_vms(call=None, resFilter=None, includeConfig=True): ''' Retrieve all VMs available on this environment CLI Example: .. code-block:: bash salt-cloud -f get_resources_vms my-proxmox-config ''' timeoutTime = time.time() + 60 while True: log.debug('Getting resource: vms.. (filter: %s)', resFilter) resources = query('get', 'cluster/resources') ret = {} badResource = False for resource in resources: if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: try: name = resource['name'] except KeyError: badResource = True log.debug('No name in VM resource %s', repr(resource)) break ret[name] = resource if includeConfig: # Requested to include the detailed configuration of a VM ret[name]['config'] = get_vmconfig( ret[name]['vmid'], ret[name]['node'], ret[name]['type'] ) if time.time() > timeoutTime: raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' 'resources vms') # Carry on if there wasn't a bad resource return from Proxmox if not badResource: break time.sleep(0.5) if resFilter is not None: log.debug('Filter given: %s, returning requested ' 'resource: nodes', resFilter) return ret[resFilter] log.debug('Filter not given: %s, returning all resource: nodes', ret) return ret def script(vm_): ''' Return the script deployment object ''' script_name = config.get_cloud_config_value('script', vm_, __opts__) if not script_name: script_name = 'bootstrap-salt' return salt.utils.cloud.os_script( script_name, vm_, __opts__, salt.utils.cloud.salt_config_to_yaml( salt.utils.cloud.minion_config(__opts__, vm_) ) ) def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret def avail_images(call=None, location='local'): ''' Return a list of the images that are on the provider CLI Example: .. code-block:: bash salt-cloud --list-images my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) ret = {} for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)): ret[item['volid']] = item return ret def list_nodes(call=None): ''' Return a list of the VMs that are managed by the provider CLI Example: .. code-block:: bash salt-cloud -Q my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)): log.debug('VM_Name: %s', vm_name) log.debug('vm_details: %s', vm_details) # Limit resultset on what Salt-cloud demands: ret[vm_name] = {} ret[vm_name]['id'] = six.text_type(vm_details['vmid']) ret[vm_name]['image'] = six.text_type(vm_details['vmid']) ret[vm_name]['size'] = six.text_type(vm_details['disk']) ret[vm_name]['state'] = six.text_type(vm_details['status']) # Figure out which is which to put it in the right column private_ips = [] public_ips = [] if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-': ips = vm_details['config']['ip_address'].split(' ') for ip_ in ips: if IP(ip_).iptype() == 'PRIVATE': private_ips.append(six.text_type(ip_)) else: public_ips.append(six.text_type(ip_)) ret[vm_name]['private_ips'] = private_ips ret[vm_name]['public_ips'] = public_ips return ret def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider CLI Example: .. code-block:: bash salt-cloud -F my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f or --function.' ) return get_resources_vms(includeConfig=True) def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields CLI Example: .. code-block:: bash salt-cloud -S my-proxmox-config ''' return salt.utils.cloud.list_nodes_select( list_nodes_full(), __opts__['query.selection'], call, ) def _stringlist_to_dictionary(input_string): ''' Convert a stringlist (comma separated settings) to a dictionary The result of the string setting1=value1,setting2=value2 will be a python dictionary: {'setting1':'value1','setting2':'value2'} ''' li = str(input_string).split(',') ret = {} for item in li: pair = str(item).replace(' ', '').split('=') if len(pair) != 2: log.warning('Cannot process stringlist item %s', item) continue ret[pair[0]] = pair[1] return ret def _dictionary_to_stringlist(input_dict): ''' Convert a dictionary to a stringlist (comma separated settings) The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: setting1=value1,setting2=value2 ''' string_value = "" for s in input_dict: string_value += "{0}={1},".format(s, input_dict[s]) string_value = string_value[:-1] return string_value def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret def _import_api(): ''' Download https://<url>/pve-docs/api-viewer/apidoc.js Extract content of pveapi var (json formated) Load this json content into global variable "api" ''' global api full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) api_json = re_filter.findall(returned_data.text)[0] api = salt.utils.json.loads(api_json) def _get_properties(path="", method="GET", forced_params=None): ''' Return the parameter list from api for defined path and HTTP method ''' if api is None: _import_api() sub = api path_levels = [level for level in path.split('/') if level != ''] search_path = '' props = [] parameters = set([] if forced_params is None else forced_params) # Browse all path elements but last for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children sub = (item for item in sub if item["path"] == search_path).next()['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) try: # get list of properties for requested method props = sub['info'][method]['parameters']['properties'].keys() except KeyError as exc: log.error('method not found: "%s"', exc) for prop in props: numerical = re.match(r'(\w+)\[n\]', prop) # generate (arbitrarily) 10 properties for duplicatable properties identified by: # "prop[n]" if numerical: for i in range(10): parameters.add(numerical.group(1) + six.text_type(i)) else: parameters.add(prop) return parameters def show_instance(name, call=None): ''' Show the details from Proxmox concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name] def get_vmconfig(vmid, node=None, node_type='openvz'): ''' Get VM configuration ''' if node is None: # We need to figure out which node this VM is on. for host_name, host_details in six.iteritems(avail_locations()): for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)): if item['vmid'] == vmid: node = host_name # If we reached this point, we have all the information we need data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid)) return data def wait_for_created(upid, timeout=300): ''' Wait until a the vm has been created successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_created: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Host has been created!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for host to be created') return False info = _lookup_proxmox_task(upid) def wait_for_state(vmid, state, timeout=300): ''' Wait until a specific state has been reached on a node ''' start_time = time.time() node = get_vm_status(vmid=vmid) if not node: log.error('wait_for_state: No VM retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if node['status'] == state: log.debug('Host %s is now in "%s" state!', node['name'], state) return True time.sleep(1) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for %s to become %s', node['name'], state) return False node = get_vm_status(vmid=vmid) log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) def wait_for_task(upid, timeout=300): ''' Wait until a the task has been finished successfully ''' start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) # Little more patience, we're not in a hurry if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid) def destroy(name, call=None): ''' Destroy a node. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vmobj = _get_vm_by_name(name) if vmobj is not None: # stop the vm if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped': stop(name, vmobj['vmid'], 'action') # wait until stopped if not wait_for_state(vmobj['vmid'], 'stopped'): return {'Error': 'Unable to stop {0}, command timed out'.format(name)} # required to wait a bit here, otherwise the VM is sometimes # still locked and destroy fails. time.sleep(3) query('delete', 'nodes/{0}/{1}'.format( vmobj['node'], vmobj['id'] )) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} def set_vm_status(status, name=None, vmid=None): ''' Convenience function for setting VM status ''' log.debug('Set status to %s for %s (%s)', status, name, vmid) if vmid is not None: log.debug('set_vm_status: via ID - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_id(vmid) else: log.debug('set_vm_status: via name - VMID %s (%s): %s', vmid, name, status) vmobj = _get_vm_by_name(name) if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj: log.error('Unable to set status %s for %s (%s)', status, name, vmid) raise SaltCloudExecutionTimeout log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) if result is not False and result is not None: log.debug('Set_vm_status action result: %s', result) return True return False def get_vm_status(vmid=None, name=None): ''' Get the status for a VM, either via the ID or the hostname ''' if vmid is not None: log.debug('get_vm_status: VMID %s', vmid) vmobj = _get_vm_by_id(vmid) elif name is not None: log.debug('get_vm_status: name %s', name) vmobj = _get_vm_by_name(name) else: log.debug("get_vm_status: No ID or NAME given") raise SaltCloudExecutionFailure log.debug('VM found: %s', vmobj) if vmobj is not None and 'node' in vmobj: log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') return False def start(name, vmid=None, call=None): ''' Start a node. CLI Example: .. code-block:: bash salt-cloud -a start mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) log.debug('Start: %s (%s) = Start', name, vmid) if not set_vm_status('start', name, vmid=vmid): log.error('Unable to bring VM %s (%s) up..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'started' return {'Started': '{0} was started.'.format(name)} def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)} def shutdown(name=None, vmid=None, call=None): ''' Shutdown a node via ACPI. CLI Example: .. code-block:: bash salt-cloud -a shutdown mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The shutdown action must be called with -a or --action.' ) if not set_vm_status('shutdown', name, vmid=vmid): log.error('Unable to shut VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Shutdown': '{0} was shutdown.'.format(name)}